mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 12:54:11 +08:00
[2/N] Enable cppcoreguidelines-special-member-functions (#138670)
Fixes #ISSUE_NUMBER Pull Request resolved: https://github.com/pytorch/pytorch/pull/138670 Approved by: https://github.com/sraikund16
This commit is contained in:
@ -64,8 +64,12 @@ struct strided_tensor_iter_fixed {
|
||||
int64_t strides_[N] = {0};
|
||||
|
||||
strided_tensor_iter_fixed(strided_tensor_iter_fixed const&) = delete;
|
||||
void operator=(strided_tensor_iter_fixed const& x) = delete;
|
||||
strided_tensor_iter_fixed(strided_tensor_iter_fixed&&) = default;
|
||||
strided_tensor_iter_fixed& operator=(strided_tensor_iter_fixed const& x) =
|
||||
delete;
|
||||
strided_tensor_iter_fixed(strided_tensor_iter_fixed&&) noexcept = default;
|
||||
strided_tensor_iter_fixed& operator=(strided_tensor_iter_fixed&& x) noexcept =
|
||||
default;
|
||||
~strided_tensor_iter_fixed() noexcept = default;
|
||||
strided_tensor_iter_fixed(
|
||||
Tensor& tensor,
|
||||
[[maybe_unused]] bool sort_strides = false)
|
||||
@ -93,8 +97,10 @@ struct strided_tensor_iter {
|
||||
std::vector<int64_t> strides_;
|
||||
|
||||
strided_tensor_iter(strided_tensor_iter const&) = delete;
|
||||
void operator=(strided_tensor_iter const& x) = delete;
|
||||
strided_tensor_iter(strided_tensor_iter&&) = default;
|
||||
strided_tensor_iter& operator=(strided_tensor_iter const& x) = delete;
|
||||
strided_tensor_iter(strided_tensor_iter&&) noexcept = default;
|
||||
strided_tensor_iter& operator=(strided_tensor_iter&&) noexcept = default;
|
||||
~strided_tensor_iter() noexcept = default;
|
||||
strided_tensor_iter(Tensor& tensor)
|
||||
: data_(tensor.data_ptr<T>()),
|
||||
dim_(tensor.ndimension()),
|
||||
|
@ -115,7 +115,10 @@ class ThreadLocal {
|
||||
explicit ThreadLocal(Accessor accessor) : accessor_(accessor) {}
|
||||
|
||||
ThreadLocal(const ThreadLocal&) = delete;
|
||||
ThreadLocal(ThreadLocal&&) noexcept = default;
|
||||
ThreadLocal& operator=(const ThreadLocal&) = delete;
|
||||
ThreadLocal& operator=(ThreadLocal&&) noexcept = default;
|
||||
~ThreadLocal() = default;
|
||||
|
||||
Type& get() {
|
||||
return *accessor_();
|
||||
|
@ -74,6 +74,8 @@ class C10_API DebugInfoGuard {
|
||||
|
||||
DebugInfoGuard(const DebugInfoGuard&) = delete;
|
||||
DebugInfoGuard(DebugInfoGuard&&) = delete;
|
||||
DebugInfoGuard& operator=(const DebugInfoGuard&) = delete;
|
||||
DebugInfoGuard& operator=(DebugInfoGuard&&) = delete;
|
||||
|
||||
private:
|
||||
bool active_ = false;
|
||||
|
@ -48,6 +48,9 @@ struct GraphTask : std::enable_shared_from_this<GraphTask> {
|
||||
struct Capture {
|
||||
Capture(const Capture&) = delete;
|
||||
Capture(Capture&&) = default;
|
||||
Capture& operator=(const Capture&) = delete;
|
||||
Capture& operator=(Capture&&) = default;
|
||||
~Capture() = default;
|
||||
|
||||
Capture(int input_idx, int output_idx)
|
||||
: input_idx_(input_idx), output_idx_(output_idx) {}
|
||||
|
@ -777,6 +777,7 @@ CacheNode* _compiled_autograd_impl(
|
||||
return cache;
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-special-member-functions)
|
||||
struct LockGuardWithErrorLogs {
|
||||
LockGuardWithErrorLogs(std::mutex& mtx) : mtx_(mtx) {
|
||||
// Note: the standard allows try_lock to fail spuriously during races for
|
||||
|
@ -5,7 +5,6 @@
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <forward_list>
|
||||
#include <new>
|
||||
#include <utility>
|
||||
|
||||
#include <c10/macros/Macros.h>
|
||||
@ -52,7 +51,10 @@ class AppendOnlyList {
|
||||
|
||||
AppendOnlyList() : buffer_last_{buffer_.before_begin()} {}
|
||||
AppendOnlyList(const AppendOnlyList&) = delete;
|
||||
AppendOnlyList(AppendOnlyList&&) = delete;
|
||||
AppendOnlyList& operator=(const AppendOnlyList&) = delete;
|
||||
AppendOnlyList& operator=(AppendOnlyList&&) = delete;
|
||||
~AppendOnlyList() = default;
|
||||
|
||||
size_t size() const {
|
||||
return n_blocks_ * ChunkSize - (size_t)(end_ - next_);
|
||||
|
@ -96,8 +96,6 @@ TraceWrapper::TraceWrapper(const int64_t start_time, const std::string& name)
|
||||
}
|
||||
#endif // USE_KINETO
|
||||
|
||||
TraceWrapper::~TraceWrapper() = default;
|
||||
|
||||
activity_t* TraceWrapper::addCPUActivity(
|
||||
const std::string& name,
|
||||
const libkineto::ActivityType type,
|
||||
|
@ -67,9 +67,6 @@ void addMetadata(
|
||||
// Wraps: libkineto::CpuTraceBuffer
|
||||
struct TraceWrapper {
|
||||
TraceWrapper(const int64_t start_time, const std::string& name);
|
||||
TraceWrapper(TraceWrapper&&) = default;
|
||||
TraceWrapper(const TraceWrapper&) = delete;
|
||||
~TraceWrapper();
|
||||
|
||||
// The caller is expected to hold a mutex when calling `addCPUActivity`.
|
||||
activity_t* addCPUActivity(
|
||||
@ -96,8 +93,6 @@ struct TraceWrapper {
|
||||
struct ActivityTraceWrapper {
|
||||
explicit ActivityTraceWrapper(std::unique_ptr<interface_trace_t>&& trace);
|
||||
ActivityTraceWrapper() = default;
|
||||
ActivityTraceWrapper(ActivityTraceWrapper&&) = default;
|
||||
ActivityTraceWrapper(const ActivityTraceWrapper&) = delete;
|
||||
explicit operator bool() const;
|
||||
void save(const std::string& path);
|
||||
|
||||
|
@ -1,28 +1,31 @@
|
||||
#include <torch/csrc/profiler/stubs/base.h>
|
||||
|
||||
#include <c10/core/Device.h>
|
||||
#include <c10/util/Exception.h>
|
||||
#include <torch/csrc/profiler/stubs/base.h>
|
||||
#include <cstdint>
|
||||
#include <functional>
|
||||
|
||||
namespace torch::profiler::impl {
|
||||
|
||||
ProfilerStubs::~ProfilerStubs() = default;
|
||||
|
||||
namespace {
|
||||
struct DefaultStubs : public ProfilerStubs {
|
||||
DefaultStubs(const char* name) : name_{name} {}
|
||||
explicit DefaultStubs(const char* name) : name_{name} {}
|
||||
|
||||
void record(c10::DeviceIndex*, ProfilerVoidEventStub*, int64_t*)
|
||||
const override {
|
||||
void record(
|
||||
c10::DeviceIndex* /*device*/,
|
||||
ProfilerVoidEventStub* /*event*/,
|
||||
int64_t* /*cpu_ns*/) const override {
|
||||
fail();
|
||||
}
|
||||
float elapsed(const ProfilerVoidEventStub*, const ProfilerVoidEventStub*)
|
||||
const override {
|
||||
float elapsed(
|
||||
const ProfilerVoidEventStub* /*event*/,
|
||||
const ProfilerVoidEventStub* /*event2*/) const override {
|
||||
fail();
|
||||
return 0.f;
|
||||
return 0.F;
|
||||
}
|
||||
void mark(const char*) const override {
|
||||
void mark(const char* /*name*/) const override {
|
||||
fail();
|
||||
}
|
||||
void rangePush(const char*) const override {
|
||||
void rangePush(const char* /*name*/) const override {
|
||||
fail();
|
||||
}
|
||||
void rangePop() const override {
|
||||
@ -31,7 +34,7 @@ struct DefaultStubs : public ProfilerStubs {
|
||||
bool enabled() const override {
|
||||
return false;
|
||||
}
|
||||
void onEachDevice(std::function<void(int)>) const override {
|
||||
void onEachDevice(std::function<void(int)> /*op*/) const override {
|
||||
fail();
|
||||
}
|
||||
void synchronize() const override {
|
||||
|
@ -33,7 +33,7 @@ struct TORCH_API ProfilerStubs {
|
||||
}
|
||||
virtual void onEachDevice(std::function<void(int)> op) const = 0;
|
||||
virtual void synchronize() const = 0;
|
||||
virtual ~ProfilerStubs();
|
||||
virtual ~ProfilerStubs() = default;
|
||||
};
|
||||
|
||||
TORCH_API void registerCUDAMethods(ProfilerStubs* stubs);
|
||||
|
@ -41,6 +41,10 @@ struct Communicate {
|
||||
err_ = std::make_unique<std::ostream>(errbuf_.get());
|
||||
}
|
||||
}
|
||||
Communicate(const Communicate&) = delete;
|
||||
Communicate(Communicate&&) = delete;
|
||||
Communicate& operator=(const Communicate&) = delete;
|
||||
Communicate& operator=(Communicate&&) = delete;
|
||||
~Communicate() {
|
||||
close(inpipe_[1]);
|
||||
close(outpipe_[0]);
|
||||
|
@ -81,7 +81,9 @@ struct MemFile {
|
||||
}
|
||||
|
||||
MemFile(const MemFile&) = delete;
|
||||
MemFile(MemFile&&) = delete;
|
||||
MemFile& operator=(const MemFile&) = delete;
|
||||
MemFile& operator=(MemFile&&) = delete;
|
||||
[[nodiscard]] const char* data() const {
|
||||
return (const char*)mem_;
|
||||
}
|
||||
|
@ -2,7 +2,6 @@
|
||||
#include <c10/util/Exception.h>
|
||||
#include <torch/csrc/profiler/unwind/unwind.h>
|
||||
#include <torch/csrc/utils/cpp_stacktraces.h>
|
||||
#include <unordered_map>
|
||||
|
||||
#if !defined(__linux__) || !defined(__x86_64__) || !defined(__has_include) || \
|
||||
!__has_include("ext/stdio_filebuf.h")
|
||||
@ -66,6 +65,10 @@ struct UpgradeExclusive {
|
||||
rdlock_.unlock();
|
||||
rdlock_.mutex()->lock();
|
||||
}
|
||||
UpgradeExclusive(const UpgradeExclusive&) = delete;
|
||||
UpgradeExclusive(UpgradeExclusive&&) = delete;
|
||||
UpgradeExclusive& operator=(const UpgradeExclusive&) = delete;
|
||||
UpgradeExclusive& operator=(UpgradeExclusive&&) = delete;
|
||||
~UpgradeExclusive() {
|
||||
rdlock_.mutex()->unlock();
|
||||
rdlock_.lock();
|
||||
|
@ -116,6 +116,7 @@ struct Option {
|
||||
Option(Option&& other) noexcept = default;
|
||||
Option& operator=(const Option&) = delete;
|
||||
Option& operator=(Option&&) = delete;
|
||||
~Option() = default;
|
||||
|
||||
std::vector<Argument> arguments;
|
||||
bool is_variadic;
|
||||
|
@ -97,6 +97,10 @@ struct EnableHermeticPyObject {
|
||||
c10::impl::tls_set_dispatch_key_included(
|
||||
at::DispatchKey::PythonTLSSnapshot, old_python_snapshot_);
|
||||
}
|
||||
EnableHermeticPyObject(const EnableHermeticPyObject&) = delete;
|
||||
EnableHermeticPyObject(EnableHermeticPyObject&&) = delete;
|
||||
EnableHermeticPyObject& operator=(const EnableHermeticPyObject&) = delete;
|
||||
EnableHermeticPyObject& operator=(EnableHermeticPyObject&&) = delete;
|
||||
bool old_;
|
||||
bool old_excluded_python_;
|
||||
bool old_python_;
|
||||
|
@ -853,6 +853,14 @@ class CheckSparseTensorInvariantsContext {
|
||||
~CheckSparseTensorInvariantsContext() {
|
||||
at::globalContext().setCheckSparseTensorInvariants(state);
|
||||
}
|
||||
CheckSparseTensorInvariantsContext(
|
||||
const CheckSparseTensorInvariantsContext&) = delete;
|
||||
CheckSparseTensorInvariantsContext(CheckSparseTensorInvariantsContext&&) =
|
||||
delete;
|
||||
CheckSparseTensorInvariantsContext& operator=(
|
||||
const CheckSparseTensorInvariantsContext&) = delete;
|
||||
CheckSparseTensorInvariantsContext& operator=(
|
||||
CheckSparseTensorInvariantsContext&&) = delete;
|
||||
|
||||
private:
|
||||
bool state;
|
||||
|
@ -27,6 +27,12 @@ struct StashTorchDispatchModeGuard {
|
||||
std::move(saved_mode_));
|
||||
}
|
||||
}
|
||||
StashTorchDispatchModeGuard(const StashTorchDispatchModeGuard&) = delete;
|
||||
StashTorchDispatchModeGuard(StashTorchDispatchModeGuard&&) = delete;
|
||||
StashTorchDispatchModeGuard& operator=(const StashTorchDispatchModeGuard&) =
|
||||
delete;
|
||||
StashTorchDispatchModeGuard& operator=(StashTorchDispatchModeGuard&&) =
|
||||
delete;
|
||||
|
||||
const std::shared_ptr<c10::impl::PyObject_TorchDispatchMode>& get_cur_mode() {
|
||||
return saved_mode_;
|
||||
@ -44,6 +50,12 @@ struct StashTorchDispatchStackGuard {
|
||||
c10::impl::TorchDispatchModeTLS::set_state(std::move(saved_state_));
|
||||
saved_state_ = std::move(old);
|
||||
}
|
||||
StashTorchDispatchStackGuard(const StashTorchDispatchStackGuard&) = delete;
|
||||
StashTorchDispatchStackGuard(StashTorchDispatchStackGuard&&) = delete;
|
||||
StashTorchDispatchStackGuard& operator=(const StashTorchDispatchStackGuard&) =
|
||||
delete;
|
||||
StashTorchDispatchStackGuard& operator=(StashTorchDispatchStackGuard&&) =
|
||||
delete;
|
||||
|
||||
~StashTorchDispatchStackGuard() {
|
||||
c10::impl::TorchDispatchModeTLS::set_state(std::move(saved_state_));
|
||||
|
Reference in New Issue
Block a user