mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
[BE] Enforce missing override
keyword (#104032)
This PR enables `-Winconsistent-missing-destructor-override` and `-Winconsistent-missing-override` and fixes violations. <!-- copilot:summary --> ### <samp>🤖 Generated by Copilot at 47e904e</samp> This pull request updates the code of various classes and operators in the `caffe2` and `aten` subdirectories to use the `override` specifier instead of the `virtual` keyword for destructors and other virtual functions that override a base class function. This improves the code readability, quality, and consistency with C++ best practices. It also modifies the `./CMakeLists.txt` file to enable warnings for these specifiers, but disable errors. Pull Request resolved: https://github.com/pytorch/pytorch/pull/104032 Approved by: https://github.com/malfet
This commit is contained in:
@ -817,6 +817,8 @@ if(NOT MSVC)
|
||||
append_cxx_flag_if_supported("-Wno-strict-aliasing" CMAKE_CXX_FLAGS)
|
||||
append_cxx_flag_if_supported("-Wvla-extension" CMAKE_CXX_FLAGS)
|
||||
append_cxx_flag_if_supported("-Wnewline-eof" CMAKE_CXX_FLAGS)
|
||||
append_cxx_flag_if_supported("-Winconsistent-missing-override" CMAKE_CXX_FLAGS)
|
||||
append_cxx_flag_if_supported("-Winconsistent-missing-destructor-override" CMAKE_CXX_FLAGS)
|
||||
if("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang")
|
||||
string(APPEND CMAKE_CXX_FLAGS " -Wno-range-loop-analysis")
|
||||
string(APPEND CMAKE_CXX_FLAGS " -Wno-pass-failed")
|
||||
@ -861,6 +863,8 @@ if(NOT MSVC)
|
||||
|
||||
append_cxx_flag_if_supported("-Wno-error=pedantic" CMAKE_CXX_FLAGS)
|
||||
append_cxx_flag_if_supported("-Wno-error=old-style-cast" CMAKE_CXX_FLAGS)
|
||||
append_cxx_flag_if_supported("-Wno-error=inconsistent-missing-override" CMAKE_CXX_FLAGS)
|
||||
append_cxx_flag_if_supported("-Wno-error=inconsistent-missing-destructor-override" CMAKE_CXX_FLAGS)
|
||||
append_cxx_flag_if_supported("-Wconstant-conversion" CMAKE_CXX_FLAGS)
|
||||
append_cxx_flag_if_supported("-Wno-invalid-partial-specialization" CMAKE_CXX_FLAGS)
|
||||
append_cxx_flag_if_supported("-Wno-unused-private-field" CMAKE_CXX_FLAGS)
|
||||
|
@ -19,8 +19,7 @@ constexpr auto kCustomRNG = DispatchKey::CustomRNGKeyId;
|
||||
|
||||
struct TestCPUGenerator : public c10::GeneratorImpl {
|
||||
TestCPUGenerator(uint64_t value) : GeneratorImpl{Device(DeviceType::CPU), DispatchKeySet(kCustomRNG)}, value_(value) { }
|
||||
// NOLINTNEXTLINE(modernize-use-override)
|
||||
~TestCPUGenerator() = default;
|
||||
~TestCPUGenerator() override = default;
|
||||
uint32_t random() { return value_; }
|
||||
uint64_t random64() { return value_; }
|
||||
c10::optional<float> next_float_normal_sample() { return next_float_normal_sample_; }
|
||||
|
@ -44,7 +44,7 @@ class AllgatherOp final : public Operator<Context> {
|
||||
}
|
||||
}
|
||||
|
||||
virtual ~AllgatherOp() {}
|
||||
~AllgatherOp() override {}
|
||||
|
||||
bool RunOnDevice() override {
|
||||
std::call_once(once_, [&] { initialize(); });
|
||||
|
@ -32,7 +32,7 @@ class AllreduceOp final : public Operator<Context> {
|
||||
}
|
||||
}
|
||||
|
||||
virtual ~AllreduceOp() {}
|
||||
~AllreduceOp() override {}
|
||||
|
||||
bool RunOnDevice() override {
|
||||
std::call_once(once_, [&] { initialize(); });
|
||||
|
@ -24,7 +24,7 @@ class BarrierOp final : public Operator<Context> {
|
||||
}
|
||||
}
|
||||
|
||||
virtual ~BarrierOp() {}
|
||||
~BarrierOp() override {}
|
||||
|
||||
bool RunOnDevice() override {
|
||||
auto context = OperatorBase::Input<std::shared_ptr<::gloo::Context>>(0);
|
||||
|
@ -29,7 +29,7 @@ class BroadcastOp final : public Operator<Context> {
|
||||
}
|
||||
}
|
||||
|
||||
virtual ~BroadcastOp() {}
|
||||
~BroadcastOp() override {}
|
||||
|
||||
bool RunOnDevice() override {
|
||||
std::call_once(once_, [&] { initialize(); });
|
||||
|
@ -49,7 +49,7 @@ class CreateCommonWorld final : public Operator<Context> {
|
||||
initialize();
|
||||
}
|
||||
|
||||
virtual ~CreateCommonWorld() {
|
||||
~CreateCommonWorld() override {
|
||||
}
|
||||
|
||||
CommonWorld rendezvousWithMPI() {
|
||||
@ -176,7 +176,7 @@ class CloneCommonWorld final : public Operator<Context> {
|
||||
}
|
||||
}
|
||||
|
||||
virtual ~CloneCommonWorld() {}
|
||||
~CloneCommonWorld() override {}
|
||||
|
||||
bool RunOnDevice() override {
|
||||
try {
|
||||
|
@ -44,7 +44,7 @@ class ReduceScatterOp final : public Operator<Context> {
|
||||
}
|
||||
}
|
||||
|
||||
virtual ~ReduceScatterOp() {}
|
||||
~ReduceScatterOp() override {}
|
||||
|
||||
bool RunOnDevice() override {
|
||||
std::call_once(once_, [&] { initialize(); });
|
||||
|
@ -12,7 +12,7 @@ class TORCH_API StoreHandlerWrapper : public ::gloo::rendezvous::Store {
|
||||
public:
|
||||
explicit StoreHandlerWrapper(StoreHandler& handler) : handler_(handler) {}
|
||||
|
||||
virtual ~StoreHandlerWrapper() {}
|
||||
virtual ~StoreHandlerWrapper() override {}
|
||||
|
||||
virtual void set(const std::string& key, const std::vector<char>& data)
|
||||
override;
|
||||
|
@ -168,7 +168,7 @@ class TORCH_API Tensor : public NeuralNetData {
|
||||
return D->getKind() == NNDataKind::Tensor;
|
||||
}
|
||||
|
||||
NeuralNetData* clone() {
|
||||
NeuralNetData* clone() override {
|
||||
return new Tensor(name_);
|
||||
}
|
||||
|
||||
@ -188,7 +188,7 @@ class TORCH_API Tensor : public NeuralNetData {
|
||||
name_ = name;
|
||||
}
|
||||
|
||||
~Tensor() {}
|
||||
~Tensor() override {}
|
||||
|
||||
private:
|
||||
std::string name_;
|
||||
|
@ -1157,7 +1157,7 @@ class Operator : public OperatorBase {
|
||||
template <class... Args> \
|
||||
explicit name(Args&&... args) \
|
||||
: Operator<Context>(std::forward<Args>(args)...) {} \
|
||||
virtual ~name() noexcept {}
|
||||
virtual ~name() noexcept override {}
|
||||
|
||||
// Helpers to implement runtime op polymorphism. Often it's convenient to make
|
||||
// an op work on different input types (e.g. i32 vs i64 indices) or special-case
|
||||
|
@ -12,7 +12,7 @@ template <class Context>
|
||||
class QTensorSerializer : public BlobSerializerBase {
|
||||
public:
|
||||
QTensorSerializer() : context_() {}
|
||||
~QTensorSerializer() {}
|
||||
~QTensorSerializer() override {}
|
||||
/**
|
||||
* Serializes a Blob. Note that this blob has to contain QTensor<Context>.
|
||||
*/
|
||||
|
@ -7,7 +7,7 @@ namespace caffe2 {
|
||||
class TORCH_API FileStoreHandler : public StoreHandler {
|
||||
public:
|
||||
explicit FileStoreHandler(const std::string& path, const std::string& prefix);
|
||||
virtual ~FileStoreHandler();
|
||||
~FileStoreHandler() override;
|
||||
|
||||
void set(const std::string& name, const std::string& data) override;
|
||||
|
||||
|
@ -30,8 +30,8 @@ class IDEEPConvOp : public IDEEPConvPoolOpBase {
|
||||
algo_ = ialgo::convolution_winograd;
|
||||
}
|
||||
}
|
||||
// NOLINTNEXTLINE(modernize-use-override,modernize-use-equals-default)
|
||||
virtual ~IDEEPConvOp() {}
|
||||
// NOLINTNEXTLINE(modernize-use-equals-default)
|
||||
~IDEEPConvOp() override {}
|
||||
|
||||
bool RunOnDeviceWithOrderNCHW() override {
|
||||
const auto& X = Input(INPUT_X);
|
||||
@ -199,8 +199,8 @@ class IDEEPConvFusionOp final : public IDEEPConvOp {
|
||||
CAFFE_THROW("Unsupported conv fusion type!");
|
||||
}
|
||||
}
|
||||
// NOLINTNEXTLINE(modernize-use-override,modernize-use-equals-default)
|
||||
virtual ~IDEEPConvFusionOp() {}
|
||||
// NOLINTNEXTLINE(modernize-use-equals-default)
|
||||
~IDEEPConvFusionOp() override {}
|
||||
};
|
||||
|
||||
const char* kConvFusionDoc = R"DOC(
|
||||
@ -291,8 +291,8 @@ class IDEEPConvGradientOp final : public IDEEPConvPoolOpBase {
|
||||
"In order to backward propagate weights correctly, "
|
||||
"please set training_mode=1");
|
||||
}
|
||||
// NOLINTNEXTLINE(modernize-use-override,modernize-use-equals-default)
|
||||
virtual ~IDEEPConvGradientOp() {}
|
||||
// NOLINTNEXTLINE(modernize-use-equals-default)
|
||||
~IDEEPConvGradientOp() override {}
|
||||
|
||||
bool RunOnDeviceWithOrderNCHW() override {
|
||||
const auto& X = Input(INPUT);
|
||||
|
@ -12,7 +12,7 @@ class IDEEPConvPoolOpBase : public ConvPoolOpBase<IDEEPContext> {
|
||||
public:
|
||||
IDEEPConvPoolOpBase(const OperatorDef& operator_def, Workspace* ws)
|
||||
: ConvPoolOpBase<IDEEPContext>(operator_def, ws) {}
|
||||
virtual ~IDEEPConvPoolOpBase() {}
|
||||
~IDEEPConvPoolOpBase() override {}
|
||||
|
||||
inline const ideep::tensor& Input(int index) {
|
||||
return OperatorBase::template Input<ideep::tensor>(index);
|
||||
|
@ -116,7 +116,7 @@ class IDEEPConvTransposeUnpoolBase : public IDEEPOperator {
|
||||
CAFFE_ENFORCE_LE(adj_[dim], stride_[dim]);
|
||||
}
|
||||
}
|
||||
virtual ~IDEEPConvTransposeUnpoolBase() {}
|
||||
~IDEEPConvTransposeUnpoolBase() override {}
|
||||
|
||||
const ideep::tensor& Input(int index) {
|
||||
return OperatorBase::template Input<ideep::tensor>(index);
|
||||
|
@ -25,8 +25,8 @@ class IDEEPInt8SumReluOp final : public IDEEPOperator {
|
||||
|
||||
Y_scales_ = ConvertScales({scale_});
|
||||
}
|
||||
// NOLINTNEXTLINE(modernize-use-equals-default,modernize-use-override)
|
||||
virtual ~IDEEPInt8SumReluOp() {}
|
||||
// NOLINTNEXTLINE(modernize-use-equals-default)
|
||||
~IDEEPInt8SumReluOp() override {}
|
||||
|
||||
bool RunOnDevice() override {
|
||||
itensor temp_ten;
|
||||
|
@ -28,8 +28,8 @@ class IDEEPInt8ConvOp : public IDEEPConvPoolOpBase {
|
||||
CAFFE_ENFORCE(zero_point_ == 128 || zero_point_ == 0);
|
||||
Y_scales_ = ConvertScales({scale_});
|
||||
}
|
||||
// NOLINTNEXTLINE(modernize-use-override,modernize-use-equals-default)
|
||||
virtual ~IDEEPInt8ConvOp() {}
|
||||
// NOLINTNEXTLINE(modernize-use-equals-default)
|
||||
~IDEEPInt8ConvOp() override {}
|
||||
|
||||
bool RunOnDeviceWithOrderNCHW() override {
|
||||
const auto &X = Input(INPUT_X);
|
||||
@ -198,8 +198,8 @@ class IDEEPInt8ConvReluOp final : public IDEEPInt8ConvOp {
|
||||
attr_ = iattr::fuse_relu();
|
||||
fusion_type_ = FUSION_CONV_RELU;
|
||||
}
|
||||
// NOLINTNEXTLINE(modernize-use-override,modernize-use-equals-default)
|
||||
virtual ~IDEEPInt8ConvReluOp() {}
|
||||
// NOLINTNEXTLINE(modernize-use-equals-default)
|
||||
~IDEEPInt8ConvReluOp() override {}
|
||||
};
|
||||
|
||||
class IDEEPInt8ConvSumOp final : public IDEEPInt8ConvOp {
|
||||
@ -213,8 +213,8 @@ class IDEEPInt8ConvSumOp final : public IDEEPInt8ConvOp {
|
||||
attr_ = iattr::fuse_sum();
|
||||
fusion_type_ = FUSION_CONV_SUM;
|
||||
}
|
||||
// NOLINTNEXTLINE(modernize-use-override,modernize-use-equals-default)
|
||||
virtual ~IDEEPInt8ConvSumOp() {}
|
||||
// NOLINTNEXTLINE(modernize-use-equals-default)
|
||||
~IDEEPInt8ConvSumOp() override {}
|
||||
};
|
||||
|
||||
class IDEEPInt8ConvSumReluOp final : public IDEEPInt8ConvOp {
|
||||
@ -228,8 +228,8 @@ class IDEEPInt8ConvSumReluOp final : public IDEEPInt8ConvOp {
|
||||
attr_ = iattr::residual();
|
||||
fusion_type_ = FUSION_CONV_SUM_RELU;
|
||||
}
|
||||
// NOLINTNEXTLINE(modernize-use-override,modernize-use-equals-default)
|
||||
virtual ~IDEEPInt8ConvSumReluOp() {}
|
||||
// NOLINTNEXTLINE(modernize-use-equals-default)
|
||||
~IDEEPInt8ConvSumReluOp() override {}
|
||||
};
|
||||
|
||||
REGISTER_IDEEP_OPERATOR_WITH_ENGINE(Int8Conv, DNNLOWP, IDEEPInt8ConvOp);
|
||||
|
@ -18,8 +18,8 @@ class IDEEPInt8DequantizeOp final : public IDEEPOperator {
|
||||
static_cast<int>(iformat::nchw)));
|
||||
}
|
||||
}
|
||||
// NOLINTNEXTLINE(modernize-use-override,modernize-use-equals-default)
|
||||
virtual ~IDEEPInt8DequantizeOp() {}
|
||||
// NOLINTNEXTLINE(modernize-use-equals-default)
|
||||
~IDEEPInt8DequantizeOp() override {}
|
||||
|
||||
bool RunOnDevice() override {
|
||||
const auto& X = Input(0);
|
||||
|
@ -26,8 +26,8 @@ public:
|
||||
}
|
||||
Y_scales_ = ConvertScales({scale_});
|
||||
}
|
||||
// NOLINTNEXTLINE(modernize-use-override,modernize-use-equals-default)
|
||||
virtual ~IDEEPInt8FullyConnectedOp() {}
|
||||
// NOLINTNEXTLINE(modernize-use-equals-default)
|
||||
~IDEEPInt8FullyConnectedOp() override {}
|
||||
|
||||
bool RunOnDevice() override {
|
||||
const auto& X = Input(INPUT);
|
||||
|
@ -26,8 +26,8 @@ class IDEEPInt8QuantizeOp final : public IDEEPOperator {
|
||||
Y_data_type_ = zero_point_ == 0 ? idtype::u8 : idtype::s8;
|
||||
Y_scales_ = ConvertScales({scale_});
|
||||
}
|
||||
// NOLINTNEXTLINE(modernize-use-override,modernize-use-equals-default)
|
||||
virtual ~IDEEPInt8QuantizeOp() {}
|
||||
// NOLINTNEXTLINE(modernize-use-equals-default)
|
||||
~IDEEPInt8QuantizeOp() override {}
|
||||
|
||||
bool RunOnDevice() override {
|
||||
const auto& X = Input(0);
|
||||
|
@ -23,20 +23,20 @@ class IDEEPContext final : public BaseContext {
|
||||
|
||||
~IDEEPContext() noexcept override {}
|
||||
|
||||
inline void SwitchToDevice(int64_t /*stream_id*/) {}
|
||||
inline void SwitchToDevice(int64_t /*stream_id*/) override {}
|
||||
using BaseContext::SwitchToDevice;
|
||||
|
||||
inline void WaitEvent(const Event& ev) {
|
||||
inline void WaitEvent(const Event& ev) override {
|
||||
ev.Wait(IDEEP, this);
|
||||
}
|
||||
|
||||
inline void Record(Event* ev, const char* err_msg = nullptr) const {
|
||||
inline void Record(Event* ev, const char* err_msg = nullptr) const override {
|
||||
CAFFE_ENFORCE(ev, "Event must not be null.");
|
||||
ev->Record(IDEEP, this, err_msg);
|
||||
}
|
||||
|
||||
|
||||
inline void FinishDeviceComputation() {}
|
||||
inline void FinishDeviceComputation() override {}
|
||||
|
||||
inline rand_gen_type& RandGenerator() {
|
||||
if (!random_generator_.get()) {
|
||||
|
@ -40,7 +40,7 @@ class IDEEPOperator : public OperatorBase {
|
||||
order_(StringToStorageOrder(
|
||||
OperatorBase::GetSingleArgument<string>("order", "NCHW"))) {
|
||||
}
|
||||
virtual ~IDEEPOperator() {}
|
||||
~IDEEPOperator() override {}
|
||||
|
||||
inline const ideep::tensor& Input(int index) {
|
||||
return OperatorBase::template Input<ideep::tensor>(index);
|
||||
@ -114,7 +114,7 @@ class IDEEPOperator : public OperatorBase {
|
||||
#define USE_SIMPLE_IDEEP_CTOR_DTOR(name) \
|
||||
name(const OperatorDef& operator_def, Workspace* ws) \
|
||||
: IDEEPOperator(operator_def, ws) {} \
|
||||
virtual ~name() {}
|
||||
~name() override {}
|
||||
|
||||
// Convert zero_point scales to min_max scales
|
||||
// NOTE:
|
||||
|
@ -31,7 +31,7 @@ class MPIBroadcastOp final : public Operator<Context> {
|
||||
MPIBroadcastOp(const OperatorDef& operator_def, Workspace* ws)
|
||||
: Operator<Context>(operator_def, ws),
|
||||
root_(OperatorBase::template GetSingleArgument<int>("root", 0)) {}
|
||||
~MPIBroadcastOp() {}
|
||||
~MPIBroadcastOp() override {}
|
||||
|
||||
bool RunOnDevice() override {
|
||||
MPI_Comm comm = OperatorBase::Input<MPICommonWorldWrapper>(0).comm();
|
||||
@ -65,7 +65,7 @@ class MPIReduceOp final : public Operator<Context> {
|
||||
MPIReduceOp(const OperatorDef& operator_def, Workspace* ws)
|
||||
: Operator<Context>(operator_def, ws),
|
||||
root_(OperatorBase::template GetSingleArgument<int>("root", 0)) {}
|
||||
~MPIReduceOp() {}
|
||||
~MPIReduceOp() override {}
|
||||
|
||||
bool RunOnDevice() override {
|
||||
MPI_Comm comm = OperatorBase::Input<MPICommonWorldWrapper>(0).comm();
|
||||
|
@ -50,7 +50,7 @@ class BatchGatherGradientOp final : public Operator<Context> {
|
||||
: Operator<Context>(std::forward<Args>(args)...),
|
||||
OP_SINGLE_ARG(int, "axis", axis_, 1),
|
||||
OP_SINGLE_ARG(bool, "match_outer", match_outer_, false) {}
|
||||
virtual ~BatchGatherGradientOp() noexcept {}
|
||||
~BatchGatherGradientOp() noexcept override {}
|
||||
|
||||
bool RunOnDevice() override {
|
||||
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(
|
||||
|
@ -19,7 +19,7 @@ class BatchPermutationOp final : public Operator<Context> {
|
||||
: Operator<Context>(std::forward<Args>(args)...) {}
|
||||
USE_OPERATOR_CONTEXT_FUNCTIONS;
|
||||
|
||||
bool RunOnDevice();
|
||||
bool RunOnDevice() override;
|
||||
};
|
||||
|
||||
template <typename T, class Context>
|
||||
@ -29,7 +29,7 @@ class BatchPermutationGradientOp final : public Operator<Context> {
|
||||
: Operator<Context>(def, ws) {}
|
||||
USE_OPERATOR_CONTEXT_FUNCTIONS;
|
||||
|
||||
bool RunOnDevice();
|
||||
bool RunOnDevice() override;
|
||||
};
|
||||
|
||||
} // namespace caffe2
|
||||
|
@ -125,7 +125,7 @@ class BatchDenseToSparseOp : public Operator<Context> {
|
||||
template <class... Args>
|
||||
explicit BatchDenseToSparseOp(Args&&... args)
|
||||
: Operator<Context>(std::forward<Args>(args)...) {}
|
||||
bool RunOnDevice() {
|
||||
bool RunOnDevice() override{
|
||||
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(
|
||||
this, Input(LENGTHS));
|
||||
}
|
||||
|
@ -58,7 +58,7 @@ class BoxWithNMSLimitOp final : public Operator<Context> {
|
||||
input_scores_fg_cls_starting_id_ = (int)input_boxes_include_bg_cls_;
|
||||
}
|
||||
|
||||
~BoxWithNMSLimitOp() {}
|
||||
~BoxWithNMSLimitOp() override {}
|
||||
|
||||
bool RunOnDevice() override {
|
||||
if (InputSize() > 2) {
|
||||
|
@ -14,7 +14,7 @@ class ChannelBackpropStatsOp : public Operator<Context> {
|
||||
template <class... Args>
|
||||
explicit ChannelBackpropStatsOp(Args&&... args)
|
||||
: Operator<Context>(std::forward<Args>(args)...) {}
|
||||
~ChannelBackpropStatsOp() {}
|
||||
~ChannelBackpropStatsOp() override {}
|
||||
|
||||
bool RunOnDevice() override {
|
||||
return true;
|
||||
|
@ -29,7 +29,7 @@ class ConvOp final : public ConvPoolOpBase<Context> {
|
||||
createSharedBuffer<Context>(ws_);
|
||||
}
|
||||
}
|
||||
~ConvOp() {}
|
||||
~ConvOp() override {}
|
||||
|
||||
bool RunOnDeviceWithOrderNCHW() override;
|
||||
bool RunOnDeviceWithOrderNHWC() override;
|
||||
@ -79,7 +79,7 @@ class ConvGradientOp final : public ConvPoolOpBase<Context> {
|
||||
std::is_same<Context, CPUContext>::value),
|
||||
"Group convolution only supports NCHW order or CPUContext right now.");
|
||||
}
|
||||
~ConvGradientOp() {}
|
||||
~ConvGradientOp() override {}
|
||||
|
||||
bool RunOnDeviceWithOrderNCHW() override;
|
||||
bool RunOnDeviceWithOrderNHWC() override;
|
||||
|
@ -651,7 +651,7 @@ class ConvPoolOpBase : public Operator<Context> {
|
||||
return TensorInferenceForSchema(def, in, in[1].dims(img_ndim));
|
||||
}
|
||||
|
||||
virtual ~ConvPoolOpBase() {}
|
||||
~ConvPoolOpBase() override {}
|
||||
|
||||
protected:
|
||||
LegacyPadding legacy_pad_;
|
||||
|
@ -208,7 +208,7 @@ class ConvTransposeUnpoolBase : public Operator<Context> {
|
||||
CAFFE_THROW("Not implemented");
|
||||
}
|
||||
|
||||
virtual ~ConvTransposeUnpoolBase() {}
|
||||
~ConvTransposeUnpoolBase() override {}
|
||||
|
||||
protected:
|
||||
// Accessors for 2D conv params.
|
||||
|
@ -139,7 +139,7 @@ class IsMemberOfOp final : public Operator<Context> {
|
||||
CAFFE_THROW("Unexpected 'dtype' argument value: ", dtype);
|
||||
}
|
||||
}
|
||||
virtual ~IsMemberOfOp() noexcept {}
|
||||
~IsMemberOfOp() noexcept override {}
|
||||
|
||||
bool RunOnDevice() override {
|
||||
return DispatchHelper<
|
||||
|
@ -17,7 +17,7 @@ class MergeDenseFeatureTensorsOp : public Operator<Context> {
|
||||
: Operator<Context>(std::forward<Args>(args)...) {
|
||||
featureIDs_ = this->template GetRepeatedArgument<int64_t>("feature_ids");
|
||||
}
|
||||
virtual ~MergeDenseFeatureTensorsOp() noexcept {}
|
||||
~MergeDenseFeatureTensorsOp() noexcept override {}
|
||||
|
||||
bool RunOnDevice() override {
|
||||
return DispatchHelper<
|
||||
@ -83,7 +83,7 @@ class MergeSingleScalarFeatureTensorsOp : public Operator<Context> {
|
||||
numInputs_ = InputSize() / kNumTensorsPerInput;
|
||||
featureIDs_ = this->template GetRepeatedArgument<int64_t>("feature_ids");
|
||||
}
|
||||
virtual ~MergeSingleScalarFeatureTensorsOp() noexcept {}
|
||||
~MergeSingleScalarFeatureTensorsOp() noexcept override {}
|
||||
|
||||
bool RunOnDevice() override {
|
||||
return DispatchHelper<
|
||||
@ -148,7 +148,7 @@ class MergeSingleScalarFeatureTensorsGradientOp : public Operator<Context> {
|
||||
: Operator<Context>(std::forward<Args>(args)...) {
|
||||
numFeatureInputs_ = InputSize() - 1; // Everything other than values_grad
|
||||
}
|
||||
virtual ~MergeSingleScalarFeatureTensorsGradientOp() noexcept {}
|
||||
~MergeSingleScalarFeatureTensorsGradientOp() noexcept override {}
|
||||
|
||||
bool RunOnDevice() override {
|
||||
return DispatchHelper<
|
||||
@ -198,7 +198,7 @@ class MergeSingleListFeatureTensorsOp : public Operator<Context> {
|
||||
inValuesOffset_.resize(numInputs_);
|
||||
featureIDs_ = this->template GetRepeatedArgument<int64_t>("feature_ids");
|
||||
}
|
||||
virtual ~MergeSingleListFeatureTensorsOp() noexcept {}
|
||||
~MergeSingleListFeatureTensorsOp() noexcept override {}
|
||||
|
||||
bool RunOnDevice() override {
|
||||
return DispatchHelper<
|
||||
@ -284,7 +284,7 @@ class MergeSingleListOrMapFeatureTensorsGradientOp : public Operator<Context> {
|
||||
: Operator<Context>(std::forward<Args>(args)...) {
|
||||
numFeatureInputs_ = (InputSize() - 1) / kNumTensorsPerInput;
|
||||
}
|
||||
virtual ~MergeSingleListOrMapFeatureTensorsGradientOp() noexcept {}
|
||||
~MergeSingleListOrMapFeatureTensorsGradientOp() noexcept override {}
|
||||
|
||||
bool RunOnDevice() override {
|
||||
return DispatchHelper<
|
||||
@ -352,7 +352,7 @@ class MergeSingleMapFeatureTensorsOp : public Operator<Context> {
|
||||
inValuesOffset_.resize(numInputs_);
|
||||
featureIDs_ = this->template GetRepeatedArgument<int64_t>("feature_ids");
|
||||
}
|
||||
virtual ~MergeSingleMapFeatureTensorsOp() noexcept {}
|
||||
~MergeSingleMapFeatureTensorsOp() noexcept override {}
|
||||
|
||||
bool RunOnDevice() override {
|
||||
return DispatchHelper<
|
||||
@ -454,7 +454,7 @@ class MergeMultiScalarFeatureTensorsOp : public Operator<Context> {
|
||||
numInputs_ = InputSize() / kNumTensorsPerInput;
|
||||
inKeysOffset_.resize(numInputs_);
|
||||
}
|
||||
virtual ~MergeMultiScalarFeatureTensorsOp() noexcept {}
|
||||
~MergeMultiScalarFeatureTensorsOp() noexcept override {}
|
||||
|
||||
bool RunOnDevice() override {
|
||||
return DispatchHelper<
|
||||
@ -526,7 +526,7 @@ class MergeMultiScalarFeatureTensorsGradientOp : public Operator<Context> {
|
||||
: Operator<Context>(std::forward<Args>(args)...) {
|
||||
numFeatureInputs_ = (InputSize() - 1) / kNumTensorsPerInput;
|
||||
}
|
||||
virtual ~MergeMultiScalarFeatureTensorsGradientOp() noexcept {}
|
||||
~MergeMultiScalarFeatureTensorsGradientOp() noexcept override {}
|
||||
|
||||
bool RunOnDevice() override {
|
||||
return DispatchHelper<
|
||||
@ -588,7 +588,7 @@ class MergeMultiListFeatureTensorsOp : public Operator<Context> {
|
||||
inKeysOffset_.resize(numInputs_);
|
||||
inValuesValuesOffset_.resize(numInputs_);
|
||||
}
|
||||
virtual ~MergeMultiListFeatureTensorsOp() noexcept {}
|
||||
~MergeMultiListFeatureTensorsOp() noexcept override {}
|
||||
|
||||
bool RunOnDevice() override {
|
||||
return DispatchHelper<
|
||||
@ -680,7 +680,7 @@ class MergeMultiMapFeatureTensorsOp : public Operator<Context> {
|
||||
inKeysOffset_.resize(numInputs_);
|
||||
inValuesValuesOffset_.resize(numInputs_);
|
||||
}
|
||||
virtual ~MergeMultiMapFeatureTensorsOp() noexcept {}
|
||||
~MergeMultiMapFeatureTensorsOp() noexcept override {}
|
||||
|
||||
bool RunOnDevice() override {
|
||||
return DispatchHelper<
|
||||
@ -786,7 +786,7 @@ class MergeMultiListOrMapFeatureTensorsGradientOp : public Operator<Context> {
|
||||
: Operator<Context>(std::forward<Args>(args)...) {
|
||||
numFeatureInputs_ = (InputSize() - 1) / kNumTensorsPerInput;
|
||||
}
|
||||
virtual ~MergeMultiListOrMapFeatureTensorsGradientOp() noexcept {}
|
||||
~MergeMultiListOrMapFeatureTensorsGradientOp() noexcept override {}
|
||||
|
||||
bool RunOnDevice() override {
|
||||
return DispatchHelper<
|
||||
|
@ -49,7 +49,7 @@ class FillerOp : public Operator<Context> {
|
||||
}
|
||||
}
|
||||
|
||||
virtual ~FillerOp() {}
|
||||
~FillerOp() override {}
|
||||
USE_OPERATOR_CONTEXT_FUNCTIONS;
|
||||
|
||||
bool RunOnDevice() override {
|
||||
|
@ -21,7 +21,7 @@ class FindOp final : public Operator<Context> {
|
||||
USE_OPERATOR_CONTEXT_FUNCTIONS;
|
||||
USE_DISPATCH_HELPER;
|
||||
|
||||
bool RunOnDevice() {
|
||||
bool RunOnDevice() override {
|
||||
return DispatchHelper<TensorTypes<int, long>>::call(this, Input(0));
|
||||
}
|
||||
|
||||
|
@ -24,7 +24,7 @@ class FullyConnectedOp final : public Operator<Context> {
|
||||
axis_w_(this->template GetSingleArgument<int32_t>("axis_w", 1)),
|
||||
float16_compute_(
|
||||
this->template GetSingleArgument<bool>("float16_compute", false)) {}
|
||||
~FullyConnectedOp() {}
|
||||
~FullyConnectedOp() override {}
|
||||
|
||||
template <
|
||||
typename T_X,
|
||||
@ -175,7 +175,7 @@ class FullyConnectedGradientOp : public Operator<Context> {
|
||||
axis_w_(this->template GetSingleArgument<int32_t>("axis_w", 1)),
|
||||
float16_compute_(
|
||||
this->template GetSingleArgument<bool>("float16_compute", false)) {}
|
||||
~FullyConnectedGradientOp() {}
|
||||
~FullyConnectedGradientOp() override {}
|
||||
|
||||
template <
|
||||
typename T_X,
|
||||
|
@ -45,7 +45,7 @@ class FloatToFusedRandRowwiseQuantizedOp : public Operator<Context> {
|
||||
}
|
||||
}
|
||||
|
||||
~FloatToFusedRandRowwiseQuantizedOp() {
|
||||
~FloatToFusedRandRowwiseQuantizedOp() override {
|
||||
if (random_) {
|
||||
#ifdef FUSED_ROWWISE_RANDOM_QUANTIZATION_USE_MKL
|
||||
int status = vslDeleteStream(&vslStream_);
|
||||
|
@ -194,7 +194,7 @@ class GatherOp : public Operator<Context> {
|
||||
}
|
||||
}
|
||||
|
||||
virtual ~GatherOp() noexcept {}
|
||||
~GatherOp() noexcept override {}
|
||||
|
||||
bool RunOnDevice() override {
|
||||
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(
|
||||
|
@ -103,7 +103,7 @@ class GenerateProposalsOp final : public Operator<Context> {
|
||||
legacy_plus_one_(
|
||||
this->template GetSingleArgument<bool>("legacy_plus_one", true)) {}
|
||||
|
||||
~GenerateProposalsOp() {}
|
||||
~GenerateProposalsOp() override {}
|
||||
|
||||
bool RunOnDevice() override;
|
||||
|
||||
|
@ -15,7 +15,7 @@ class GluOp final : public Operator<Context> {
|
||||
|
||||
USE_OPERATOR_CONTEXT_FUNCTIONS;
|
||||
|
||||
bool RunOnDevice() {
|
||||
bool RunOnDevice() override {
|
||||
auto& X = Input(0);
|
||||
|
||||
vector<int64_t> Yshape;
|
||||
|
@ -37,7 +37,7 @@ class Float16ConstantFillOp : public Operator<CPUContext> {
|
||||
shape_(this->template GetRepeatedArgument<int64_t>("shape")) {}
|
||||
|
||||
USE_OPERATOR_FUNCTIONS(CPUContext);
|
||||
virtual ~Float16ConstantFillOp() {}
|
||||
~Float16ConstantFillOp() override {}
|
||||
|
||||
bool RunOnDevice() override;
|
||||
|
||||
@ -68,7 +68,7 @@ class Float16UniformFillOp : public Operator<Context> {
|
||||
}
|
||||
|
||||
USE_OPERATOR_CONTEXT_FUNCTIONS;
|
||||
virtual ~Float16UniformFillOp() {}
|
||||
~Float16UniformFillOp() override {}
|
||||
|
||||
bool RunOnDevice() override;
|
||||
|
||||
|
@ -27,7 +27,7 @@ class InstanceNormOp final : public Operator<Context> {
|
||||
"order should be either \"NCHW\" or \"NHWC\".");
|
||||
}
|
||||
|
||||
bool RunOnDevice() {
|
||||
bool RunOnDevice() override {
|
||||
const auto& X = Input(INPUT);
|
||||
const auto& gamma = Input(SCALE);
|
||||
const auto& beta = Input(BIAS);
|
||||
@ -144,7 +144,7 @@ class InstanceNormGradientOp final : public Operator<Context> {
|
||||
"order should be either \"NCHW\" or \"NHWC\".");
|
||||
}
|
||||
|
||||
bool RunOnDevice() {
|
||||
bool RunOnDevice() override {
|
||||
const auto& X = Input(INPUT);
|
||||
const auto& gamma = Input(SCALE);
|
||||
const auto& dY = Input(OUTPUT_GRAD);
|
||||
|
@ -29,7 +29,7 @@ class LengthsSplitOp final : public Operator<Context> {
|
||||
"`n_split` must contain a positive value for defined behavior.");
|
||||
}
|
||||
}
|
||||
~LengthsSplitOp() {}
|
||||
~LengthsSplitOp() override {}
|
||||
|
||||
bool RunOnDevice() override {
|
||||
const auto& L = Input(0);
|
||||
|
@ -32,7 +32,7 @@ class CPUSparseLengthsReductionOp : public Operator<CPUContext> {
|
||||
!(USE_WEIGHT & USE_MEAN), "Cannot both specify weight and mean.");
|
||||
}
|
||||
|
||||
~CPUSparseLengthsReductionOp() {}
|
||||
~CPUSparseLengthsReductionOp() override {}
|
||||
|
||||
// Currently, we support float and at::Half inputs for input data type, and
|
||||
// int32_t and int64_t for the index type.
|
||||
@ -287,7 +287,7 @@ class TTSparseLengthsSumOp final : public Operator<Context> {
|
||||
}
|
||||
}
|
||||
|
||||
~TTSparseLengthsSumOp() {}
|
||||
~TTSparseLengthsSumOp() override {}
|
||||
|
||||
void Ind2Sub(int64_t* out_factor_index, const int64_t* indices, int len) {
|
||||
// TODO: vectorization
|
||||
@ -492,7 +492,7 @@ class TTSparseLengthsSumGradientOp final : public Operator<Context> {
|
||||
: Operator<Context>(std::forward<Args>(args)...) {}
|
||||
bool RunOnDevice() override;
|
||||
|
||||
~TTSparseLengthsSumGradientOp() {}
|
||||
~TTSparseLengthsSumGradientOp() override {}
|
||||
};
|
||||
|
||||
// implement the gradient op for TTLengthSumGradient op
|
||||
|
@ -26,7 +26,7 @@ class LocallyConnectedOp final : public ConvPoolOpBase<Context> {
|
||||
"Group locally connected only supports NCHW order right now.");
|
||||
}
|
||||
|
||||
~LocallyConnectedOp() = default;
|
||||
~LocallyConnectedOp() override = default;
|
||||
|
||||
bool RunOnDeviceWithOrderNCHW() override;
|
||||
bool RunOnDeviceWithOrderNHWC() override;
|
||||
@ -81,7 +81,7 @@ class LocallyConnectedGradientOp final : public ConvPoolOpBase<Context> {
|
||||
"Group locally connected only supports NCHW order right now.");
|
||||
}
|
||||
|
||||
~LocallyConnectedGradientOp() = default;
|
||||
~LocallyConnectedGradientOp() override = default;
|
||||
|
||||
bool RunOnDeviceWithOrderNCHW() override;
|
||||
bool RunOnDeviceWithOrderNHWC() override;
|
||||
|
@ -32,7 +32,7 @@ class LogitGradientOp final : public Operator<Context> {
|
||||
explicit LogitGradientOp(Args&&... args)
|
||||
: Operator<Context>(std::forward<Args>(args)...),
|
||||
eps_(this->template GetSingleArgument<float>("eps", 1e-6f)) {}
|
||||
~LogitGradientOp() {}
|
||||
~LogitGradientOp() override {}
|
||||
|
||||
bool RunOnDevice() override;
|
||||
|
||||
|
@ -52,7 +52,7 @@ class CreateMapOp final : public Operator<Context> {
|
||||
template <class... Args>
|
||||
explicit CreateMapOp(Args&&... args)
|
||||
: Operator<Context>(std::forward<Args>(args)...) {}
|
||||
~CreateMapOp() {}
|
||||
~CreateMapOp() override {}
|
||||
|
||||
bool RunOnDevice() override {
|
||||
TensorProto::DataType key_dtype = static_cast<TensorProto::DataType>(
|
||||
@ -104,7 +104,7 @@ class KeyValueToMapOp final : public Operator<Context> {
|
||||
template <class... Args>
|
||||
explicit KeyValueToMapOp(Args&&... args)
|
||||
: Operator<Context>(std::forward<Args>(args)...) {}
|
||||
~KeyValueToMapOp() {}
|
||||
~KeyValueToMapOp() override {}
|
||||
|
||||
bool RunOnDevice() override {
|
||||
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(
|
||||
@ -157,7 +157,7 @@ class MapToKeyValueOp final : public Operator<Context> {
|
||||
template <class... Args>
|
||||
explicit MapToKeyValueOp(Args&&... args)
|
||||
: Operator<Context>(std::forward<Args>(args)...) {}
|
||||
~MapToKeyValueOp() {}
|
||||
~MapToKeyValueOp() override {}
|
||||
|
||||
bool RunOnDevice() override {
|
||||
return DispatchHelper<TensorTypes<
|
||||
|
@ -18,7 +18,7 @@ class MatMulOp final : public Operator<Context> {
|
||||
axis_b_(this->template GetSingleArgument<int>("axis_b", 1)),
|
||||
trans_a_(this->template GetSingleArgument<int>("trans_a", 0)),
|
||||
trans_b_(this->template GetSingleArgument<int>("trans_b", 0)) {}
|
||||
~MatMulOp() {}
|
||||
~MatMulOp() override {}
|
||||
|
||||
bool RunOnDevice() override {
|
||||
const auto& A = Input(0);
|
||||
|
@ -122,7 +122,7 @@ class MaxGradientOp final : public SelectGradientOpBase<T, Context> {
|
||||
explicit MaxGradientOp(Args&&... args)
|
||||
: SelectGradientOpBase<T, Context>(std::forward<Args>(args)...) {}
|
||||
|
||||
~MaxGradientOp() = default;
|
||||
~MaxGradientOp() override = default;
|
||||
};
|
||||
|
||||
template <typename T, class Context>
|
||||
@ -132,7 +132,7 @@ class MinGradientOp final : public SelectGradientOpBase<T, Context> {
|
||||
explicit MinGradientOp(Args&&... args)
|
||||
: SelectGradientOpBase<T, Context>(std::forward<Args>(args)...) {}
|
||||
|
||||
~MinGradientOp() = default;
|
||||
~MinGradientOp() override = default;
|
||||
};
|
||||
|
||||
} // namespace caffe2
|
||||
|
@ -18,7 +18,7 @@ class NumpyTileOp : public Operator<Context> {
|
||||
template <class... Args>
|
||||
explicit NumpyTileOp(Args&&... args)
|
||||
: Operator<Context>(std::forward<Args>(args)...) {}
|
||||
~NumpyTileOp() {}
|
||||
~NumpyTileOp() override {}
|
||||
|
||||
bool RunOnDevice() override {
|
||||
const auto& input = Input(0);
|
||||
|
@ -47,7 +47,7 @@ class ONNXWhileOp final : public Operator<Context> {
|
||||
|
||||
USE_OPERATOR_CONTEXT_FUNCTIONS;
|
||||
|
||||
bool RunOnDevice() {
|
||||
bool RunOnDevice() override {
|
||||
return DispatchHelper<TensorTypes<int, bool, long>>::call(this, Input(1));
|
||||
}
|
||||
|
||||
|
@ -37,7 +37,7 @@ class PackSegmentsOp final : public Operator<Context> {
|
||||
}
|
||||
}
|
||||
|
||||
bool RunOnDevice() {
|
||||
bool RunOnDevice() override {
|
||||
return DispatchHelper<TensorTypes<int, long>>::call(this, Input(LENGTHS));
|
||||
}
|
||||
|
||||
|
@ -42,7 +42,7 @@ class PadImageOp final : public ConvPoolOpBase<Context> {
|
||||
// output size.
|
||||
kernel_.assign(pads_.size() / 2, 1);
|
||||
}
|
||||
~PadImageOp() {}
|
||||
~PadImageOp() override {}
|
||||
|
||||
bool RunOnDeviceWithOrderNCHW() override;
|
||||
bool RunOnDeviceWithOrderNHWC() override;
|
||||
@ -78,7 +78,7 @@ class PadImageGradientOp final : public ConvPoolOpBase<Context> {
|
||||
// output size.
|
||||
kernel_.assign(pads_.size() / 2, 1);
|
||||
}
|
||||
~PadImageGradientOp() {}
|
||||
~PadImageGradientOp() override {}
|
||||
|
||||
bool RunOnDeviceWithOrderNCHW() override;
|
||||
bool RunOnDeviceWithOrderNHWC() override;
|
||||
|
@ -33,7 +33,7 @@ class PoolOp final : public ConvPoolOpBase<Context> {
|
||||
}
|
||||
}
|
||||
|
||||
~PoolOp() = default;
|
||||
~PoolOp() override = default;
|
||||
|
||||
bool RunOnDeviceWithOrderNCHW() override {
|
||||
const auto& X = Input(0);
|
||||
@ -112,7 +112,7 @@ class PoolGradientOp final : public ConvPoolOpBase<Context> {
|
||||
explicit PoolGradientOp(Args&&... args)
|
||||
: ConvPoolOpBase<Context>(std::forward<Args>(args)...), functor_(*this) {}
|
||||
|
||||
~PoolGradientOp() = default;
|
||||
~PoolGradientOp() override = default;
|
||||
|
||||
bool RunOnDeviceWithOrderNCHW() override {
|
||||
const auto& X = Input(0);
|
||||
|
@ -35,7 +35,7 @@ class PrefetchOperator : public OperatorBase {
|
||||
context_.SwitchToDevice();
|
||||
}
|
||||
|
||||
virtual ~PrefetchOperator() noexcept {
|
||||
~PrefetchOperator() noexcept override {
|
||||
CHECK(finalize_ || !prefetch_thread_.get())
|
||||
<< "YOU MADE A PROGRAMMING ERROR: derived class of PrefetchOperator "
|
||||
"should call Finalize() in its destructor so the prefetching "
|
||||
|
@ -108,7 +108,7 @@ class QuantDecodeOp final : public Operator<CPUContext> {
|
||||
explicit QuantDecodeOp(Args&&... args)
|
||||
: Operator<CPUContext>(std::forward<Args>(args)...) {}
|
||||
|
||||
~QuantDecodeOp() {}
|
||||
~QuantDecodeOp() override {}
|
||||
|
||||
bool RunOnDevice() override {
|
||||
CAFFE_ENFORCE_GT(InputSize(), 1);
|
||||
@ -144,7 +144,7 @@ class QuantDecodeGradientOp final : public Operator<CPUContext> {
|
||||
template <class... Args>
|
||||
explicit QuantDecodeGradientOp(Args&&... args)
|
||||
: Operator<CPUContext>(std::forward<Args>(args)...) {}
|
||||
~QuantDecodeGradientOp() {}
|
||||
~QuantDecodeGradientOp() override {}
|
||||
|
||||
bool RunOnDevice() override {
|
||||
// Inputs: 1 codebook, n tensors of codes, and n corresponding gradients.
|
||||
|
@ -18,7 +18,7 @@ class Int8AddOp final : public Operator<CPUContext> {
|
||||
explicit Int8AddOp(const OperatorDef& operator_def, Workspace* ws)
|
||||
: Operator<CPUContext>(operator_def, ws), ws_(ws) {}
|
||||
|
||||
~Int8AddOp() {
|
||||
~Int8AddOp() override {
|
||||
if (this->qnnpackOperator_ != nullptr) {
|
||||
qnnp_delete_operator(this->qnnpackOperator_);
|
||||
this->qnnpackOperator_ = nullptr;
|
||||
|
@ -23,7 +23,7 @@ class Int8AveragePoolOp final : public ConvPoolOpBase<CPUContext> {
|
||||
this->order_ == StorageOrder::NHWC, "Int8 only supports NHWC order.");
|
||||
}
|
||||
|
||||
~Int8AveragePoolOp() {
|
||||
~Int8AveragePoolOp() override {
|
||||
if (this->qnnpackOperator_ != nullptr) {
|
||||
qnnp_delete_operator(this->qnnpackOperator_);
|
||||
this->qnnpackOperator_ = nullptr;
|
||||
|
@ -25,7 +25,7 @@ class Int8ChannelShuffleOp final : public ConvPoolOpBase<CPUContext> {
|
||||
"Int8ChannelShuffleOp only supports NHWC order");
|
||||
}
|
||||
|
||||
~Int8ChannelShuffleOp() {
|
||||
~Int8ChannelShuffleOp() override {
|
||||
if (this->qnnpackOperator_ != nullptr) {
|
||||
qnnp_delete_operator(this->qnnpackOperator_);
|
||||
this->qnnpackOperator_ = nullptr;
|
||||
|
@ -27,7 +27,7 @@ class Int8ConvOp final : public ConvPoolOpBase<CPUContext> {
|
||||
createSharedBuffer<CPUContext>(ws_);
|
||||
}
|
||||
|
||||
~Int8ConvOp() {
|
||||
~Int8ConvOp() override {
|
||||
if (this->qnnpackObject_ != nullptr) {
|
||||
qnnp_delete_operator(this->qnnpackObject_);
|
||||
this->qnnpackObject_ = nullptr;
|
||||
|
@ -26,7 +26,7 @@ class Int8ConvTransposeOp final : public ConvTransposeUnpoolBase<CPUContext> {
|
||||
createSharedBuffer<CPUContext>(ws_);
|
||||
}
|
||||
|
||||
~Int8ConvTransposeOp() {
|
||||
~Int8ConvTransposeOp() override {
|
||||
if (this->qnnpackObject_ != nullptr) {
|
||||
qnnp_delete_operator(this->qnnpackObject_);
|
||||
this->qnnpackObject_ = nullptr;
|
||||
|
@ -20,7 +20,7 @@ class Int8FCOp final : public Operator<CPUContext> {
|
||||
createSharedBuffer<CPUContext>(ws_);
|
||||
}
|
||||
|
||||
~Int8FCOp() {
|
||||
~Int8FCOp() override {
|
||||
if (this->qnnpackObject_ != nullptr) {
|
||||
qnnp_delete_operator(this->qnnpackObject_);
|
||||
this->qnnpackObject_ = nullptr;
|
||||
|
@ -25,7 +25,7 @@ class Int8LeakyReluOp final : public Operator<CPUContext> {
|
||||
#endif
|
||||
}
|
||||
|
||||
~Int8LeakyReluOp() {
|
||||
~Int8LeakyReluOp() override {
|
||||
if (this->qnnpackOperator_ != nullptr) {
|
||||
qnnp_delete_operator(this->qnnpackOperator_);
|
||||
this->qnnpackOperator_ = nullptr;
|
||||
|
@ -23,7 +23,7 @@ class Int8MaxPoolOp final : public ConvPoolOpBase<CPUContext> {
|
||||
this->order_ == StorageOrder::NHWC, "Int8 only supports NHWC order.");
|
||||
}
|
||||
|
||||
~Int8MaxPoolOp() {
|
||||
~Int8MaxPoolOp() override {
|
||||
if (this->qnnpackOperator_ != nullptr) {
|
||||
qnnp_delete_operator(this->qnnpackOperator_);
|
||||
this->qnnpackOperator_ = nullptr;
|
||||
|
@ -21,7 +21,7 @@ class Int8ReluOp final : public Operator<CPUContext> {
|
||||
#endif
|
||||
}
|
||||
|
||||
~Int8ReluOp() {
|
||||
~Int8ReluOp() override {
|
||||
if (this->qnnpackOperator_ != nullptr) {
|
||||
qnnp_delete_operator(this->qnnpackOperator_);
|
||||
this->qnnpackOperator_ = nullptr;
|
||||
|
@ -20,7 +20,7 @@ class Int8SigmoidOp final : public Operator<CPUContext> {
|
||||
#endif
|
||||
}
|
||||
|
||||
~Int8SigmoidOp() {
|
||||
~Int8SigmoidOp() override {
|
||||
if (this->qnnpackOperator_ != nullptr) {
|
||||
qnnp_delete_operator(this->qnnpackOperator_);
|
||||
this->qnnpackOperator_ = nullptr;
|
||||
|
@ -21,7 +21,7 @@ class Int8SoftmaxOp final : public Operator<CPUContext> {
|
||||
#endif
|
||||
}
|
||||
|
||||
~Int8SoftmaxOp() {
|
||||
~Int8SoftmaxOp() override {
|
||||
if (this->qnnpackOperator_ != nullptr) {
|
||||
qnnp_delete_operator(this->qnnpackOperator_);
|
||||
this->qnnpackOperator_ = nullptr;
|
||||
|
@ -20,7 +20,7 @@ class MaxReduceDimsOp final : public Operator<Context> {
|
||||
|
||||
USE_OPERATOR_CONTEXT_FUNCTIONS;
|
||||
|
||||
bool RunOnDevice() {
|
||||
bool RunOnDevice() override {
|
||||
auto& X = Input(0);
|
||||
|
||||
CAFFE_ENFORCE(
|
||||
|
@ -27,7 +27,7 @@ class SumElementsOp : public Operator<Context> {
|
||||
explicit SumElementsOp(const c10::FunctionSchema& schema, std::vector<c10::IValue> inputs, std::vector<c10::IValue*> outputs, bool average)
|
||||
: Operator<Context>(schema, std::move(inputs), std::move(outputs)), average_(average) {}
|
||||
#endif
|
||||
~SumElementsOp() {}
|
||||
~SumElementsOp() override {}
|
||||
|
||||
bool RunOnDevice() override {
|
||||
auto& X = Input(0);
|
||||
@ -62,7 +62,7 @@ class SumElementsIntOp : public Operator<Context> {
|
||||
template <class... Args>
|
||||
explicit SumElementsIntOp(Args&&... args)
|
||||
: Operator<Context>(std::forward<Args>(args)...) {}
|
||||
~SumElementsIntOp() {}
|
||||
~SumElementsIntOp() override {}
|
||||
|
||||
bool RunOnDevice() override {
|
||||
auto& X = Input(0);
|
||||
@ -95,7 +95,7 @@ class SumElementsGradientOp : public Operator<Context> {
|
||||
explicit SumElementsGradientOp(const c10::FunctionSchema& schema, std::vector<c10::IValue> inputs, std::vector<c10::IValue*> outputs, bool average)
|
||||
: Operator<Context>(schema, std::move(inputs), std::move(outputs)), average_(average) {}
|
||||
#endif
|
||||
~SumElementsGradientOp() {}
|
||||
~SumElementsGradientOp() override {}
|
||||
|
||||
bool RunOnDevice() override;
|
||||
|
||||
|
@ -486,7 +486,7 @@ class TORCH_API ThreadedRecurrentNetworkExecutor : public RecurrentNetworkExecut
|
||||
: RecurrentNetworkExecutorBase(step_net_def, recurrent_input_map, timestep_blob),
|
||||
failed_(false) {}
|
||||
|
||||
~ThreadedRecurrentNetworkExecutor() {
|
||||
~ThreadedRecurrentNetworkExecutor() override {
|
||||
task_queue_.NoMoreJobs();
|
||||
VLOG(1) << "Joining workers.";
|
||||
for (auto& worker : workers_) {
|
||||
|
@ -40,7 +40,7 @@ class SpatialBNOp : public Operator<Context> {
|
||||
CAFFE_ENFORCE_LE(momentum_, 1);
|
||||
}
|
||||
|
||||
virtual ~SpatialBNOp() = default;
|
||||
~SpatialBNOp() override = default;
|
||||
|
||||
bool RunOnDevice() override {
|
||||
return DispatchHelper<TensorTypes<float>>::call(this, Input(0));
|
||||
@ -305,7 +305,7 @@ class SpatialBNGradientOp : public Operator<Context> {
|
||||
CAFFE_ENFORCE_EQ(OutputSize(), 3);
|
||||
}
|
||||
|
||||
virtual ~SpatialBNGradientOp() = default;
|
||||
~SpatialBNGradientOp() override = default;
|
||||
|
||||
bool RunOnDevice() override {
|
||||
return DispatchHelper<TensorTypes<float>>::call(this, Input(0));
|
||||
|
@ -37,7 +37,7 @@ class SummarizeOp final : public Operator<Context> {
|
||||
log_file_->rdstate());
|
||||
}
|
||||
}
|
||||
~SummarizeOp() {
|
||||
~SummarizeOp() override {
|
||||
if (to_file_)
|
||||
log_file_->close();
|
||||
}
|
||||
|
@ -16,7 +16,7 @@ class TensorProtosDBInput final : public PrefetchOperator<Context> {
|
||||
using OperatorBase::OutputSize;
|
||||
using PrefetchOperator<Context>::prefetch_thread_;
|
||||
explicit TensorProtosDBInput(const OperatorDef& operator_def, Workspace* ws);
|
||||
~TensorProtosDBInput() {
|
||||
~TensorProtosDBInput() override {
|
||||
PrefetchOperator<Context>::Finalize();
|
||||
}
|
||||
|
||||
|
@ -107,7 +107,7 @@ class TORCH_API BufferedTokenizer {
|
||||
class TORCH_API FileReader : public StringProvider {
|
||||
public:
|
||||
explicit FileReader(const std::string& path, size_t bufferSize = 65536);
|
||||
~FileReader();
|
||||
~FileReader() override;
|
||||
void operator()(CharRange& range) override;
|
||||
void reset() override;
|
||||
|
||||
|
@ -19,7 +19,7 @@ class TopKOp : public Operator<Context> {
|
||||
OP_SINGLE_ARG(int, "axis", axis_, -1) {
|
||||
}
|
||||
|
||||
~TopKOp() {}
|
||||
~TopKOp() override {}
|
||||
|
||||
bool RunOnDevice() override;
|
||||
|
||||
@ -38,7 +38,7 @@ class TopKGradientOp : public Operator<Context> {
|
||||
: Operator<Context>(std::forward<Args>(args)...),
|
||||
OP_SINGLE_ARG(int, "axis", axis_, -1) {}
|
||||
|
||||
~TopKGradientOp() {}
|
||||
~TopKGradientOp() override {}
|
||||
|
||||
bool RunOnDevice() override;
|
||||
|
||||
|
@ -25,7 +25,7 @@ class TTLinearOp final : public Operator<Context> {
|
||||
out_sizes_(this->template GetRepeatedArgument<int>("out_sizes")),
|
||||
tt_ranks_(this->template GetRepeatedArgument<int>("tt_ranks")),
|
||||
Y_temp_(unique_ptr<Blob>(new Blob())) {}
|
||||
~TTLinearOp() {}
|
||||
~TTLinearOp() override {}
|
||||
|
||||
bool RunOnDevice() override {
|
||||
const auto& X = Input(0); // Input array
|
||||
@ -180,7 +180,7 @@ class TTLinearGradientOp : public Operator<Context> {
|
||||
template <class... Args>
|
||||
explicit TTLinearGradientOp(Args&&... args)
|
||||
: Operator<Context>(std::forward<Args>(args)...) {}
|
||||
~TTLinearGradientOp() {}
|
||||
~TTLinearGradientOp() override {}
|
||||
|
||||
bool RunOnDevice() override {
|
||||
return false;
|
||||
|
@ -648,7 +648,7 @@ template <class Context>
|
||||
class ScatterAssignOp : public Operator<Context> {
|
||||
public:
|
||||
USE_OPERATOR_CONTEXT_FUNCTIONS;
|
||||
virtual ~ScatterAssignOp() {}
|
||||
~ScatterAssignOp() override {}
|
||||
|
||||
template <class... Args>
|
||||
explicit ScatterAssignOp(Args&&... args)
|
||||
|
@ -173,8 +173,8 @@ class ConvTransposeConverter : public Converter {
|
||||
}
|
||||
// Does not override default converter to OperatorDef
|
||||
|
||||
// NOLINTNEXTLINE(modernize-use-override,modernize-use-equals-default)
|
||||
virtual ~ConvTransposeConverter() {}
|
||||
// NOLINTNEXTLINE(modernize-use-equals-default)
|
||||
~ConvTransposeConverter() override {}
|
||||
};
|
||||
|
||||
REGISTER_CONVERTER(Conv, ConvConverter);
|
||||
|
@ -70,7 +70,7 @@ C10_DECLARE_REGISTRY(ConverterRegistry, Converter);
|
||||
const OperatorDef& op) override { \
|
||||
return std::make_unique<nom::repr::opName>(); \
|
||||
} \
|
||||
virtual ~opName##Converter() {} \
|
||||
~opName##Converter() override {} \
|
||||
};
|
||||
|
||||
} // namespace caffe2
|
||||
|
@ -181,7 +181,7 @@ class OnnxifiOp final : public Operator<Context> {
|
||||
buildBackendAndGraph(ws, property_pointers, onnx_model_str);
|
||||
}
|
||||
|
||||
~OnnxifiOp() {
|
||||
~OnnxifiOp() override {
|
||||
backend_graph_shared_ptr_.reset();
|
||||
backend_graph_map_ptr_->remove(op_id_string_);
|
||||
#ifdef ONNXIFI_ENABLE_EXT
|
||||
|
@ -14,7 +14,7 @@ namespace caffe2 {
|
||||
class OutputMinMaxObserver final : public ObserverBase<OperatorBase> {
|
||||
public:
|
||||
explicit OutputMinMaxObserver(OperatorBase* op);
|
||||
~OutputMinMaxObserver();
|
||||
~OutputMinMaxObserver() override;
|
||||
|
||||
struct TensorInfo {
|
||||
explicit TensorInfo(const std::string& name)
|
||||
@ -65,7 +65,7 @@ class OutputMinMaxNetObserver final : public NetObserver {
|
||||
const std::string& out_file_name,
|
||||
int dump_freq = -1,
|
||||
string delimiter = " ");
|
||||
~OutputMinMaxNetObserver();
|
||||
~OutputMinMaxNetObserver() override;
|
||||
|
||||
private:
|
||||
void Stop() override;
|
||||
@ -140,7 +140,7 @@ class HistogramNetObserver final : public NetObserver {
|
||||
bool mul_nets = false,
|
||||
string op_filter = "",
|
||||
string delimiter = " ");
|
||||
~HistogramNetObserver();
|
||||
~HistogramNetObserver() override;
|
||||
void DumpHistogramFile() {
|
||||
DumpAndReset_(out_file_name_, false);
|
||||
}
|
||||
@ -174,7 +174,7 @@ class OutputColumnMaxHistogramNetObserver final : public NetObserver {
|
||||
int dump_freq = -1,
|
||||
bool mul_nets = false,
|
||||
string delimiter = " ");
|
||||
~OutputColumnMaxHistogramNetObserver();
|
||||
~OutputColumnMaxHistogramNetObserver() override;
|
||||
void DumpOutputColumnMaxHistogramFile() {
|
||||
DumpAndReset_(out_file_name_, false);
|
||||
}
|
||||
|
@ -19,7 +19,7 @@ class ConvDNNLowPOp : public ConvPoolDNNLowPOpBase<T, ConvFp32Op> {
|
||||
USE_CONV_POOL_BASE_FUNCTIONS(CPUContext);
|
||||
USE_CONV_POOL_DNNLOWP_OPERATOR_BASE_FUNCTIONS(T, ConvFp32Op);
|
||||
ConvDNNLowPOp(const OperatorDef& operator_def, Workspace* ws);
|
||||
virtual ~ConvDNNLowPOp();
|
||||
~ConvDNNLowPOp() override;
|
||||
|
||||
protected:
|
||||
bool RunOnDeviceWithOrderNCHW() override;
|
||||
|
@ -20,7 +20,7 @@ class ConvReluOp final : public ConvPoolOpBase<Context> {
|
||||
TORCH_CHECK_NOTNULL(local_output_blobs_.back());
|
||||
}
|
||||
}
|
||||
~ConvReluOp() {}
|
||||
~ConvReluOp() override {}
|
||||
|
||||
bool RunOnDeviceWithOrderNCHW() override;
|
||||
bool RunOnDeviceWithOrderNHWC() override;
|
||||
|
@ -70,7 +70,7 @@ class FbFCPackedOperator final : public Operator<Context> {
|
||||
: Operator<Context>(operator_def, ws),
|
||||
axis_(this->template GetSingleArgument<int32_t>("axis", 1)),
|
||||
axis_w_(this->template GetSingleArgument<int32_t>("axis_w", 1)) {}
|
||||
~FbFCPackedOperator() {}
|
||||
~FbFCPackedOperator() override {}
|
||||
|
||||
// template on X, B, and Y.
|
||||
template <typename T_X, typename T_B, typename T_Y>
|
||||
|
@ -37,7 +37,7 @@ class FbGemmPackOp final : public Operator<Context> {
|
||||
axis_(this->template GetSingleArgument<int32_t>("axis_w", 1)),
|
||||
no_packing_(
|
||||
this->template GetSingleArgument<int32_t>("no_packing", 0)) {}
|
||||
~FbGemmPackOp() {}
|
||||
~FbGemmPackOp() override {}
|
||||
|
||||
bool RunOnDevice() override {
|
||||
const auto& X = Input(0);
|
||||
|
@ -58,7 +58,7 @@ class FullyConnectedFakeLowpFPOp final : public Operator<Context> {
|
||||
axis_w_(this->template GetSingleArgument<int32_t>("axis_w", 1)),
|
||||
float16_compute_(
|
||||
this->template GetSingleArgument<bool>("float16_compute", false)) {}
|
||||
~FullyConnectedFakeLowpFPOp() {}
|
||||
~FullyConnectedFakeLowpFPOp() override {}
|
||||
|
||||
template <
|
||||
typename T_X,
|
||||
@ -104,7 +104,7 @@ class FullyConnectedGradientFakeLowpFPOp : public Operator<Context> {
|
||||
axis_w_(this->template GetSingleArgument<int32_t>("axis_w", 1)),
|
||||
float16_compute_(
|
||||
this->template GetSingleArgument<bool>("float16_compute", false)) {}
|
||||
~FullyConnectedGradientFakeLowpFPOp() {}
|
||||
~FullyConnectedGradientFakeLowpFPOp() override {}
|
||||
|
||||
template <
|
||||
typename T_X,
|
||||
|
@ -14,7 +14,7 @@ class LSTMUnitDNNLowPOp final : public LSTMUnitOp<CPUContext> {
|
||||
|
||||
public:
|
||||
LSTMUnitDNNLowPOp(const OperatorDef& operator_def, Workspace* ws);
|
||||
~LSTMUnitDNNLowPOp();
|
||||
~LSTMUnitDNNLowPOp() override;
|
||||
bool RunOnDevice() override;
|
||||
|
||||
private:
|
||||
|
@ -30,7 +30,7 @@ class GatherDNNLowPOp final : public GatherOp<CPUContext> {
|
||||
|
||||
public:
|
||||
GatherDNNLowPOp(const OperatorDef& operator_def, Workspace* ws);
|
||||
~GatherDNNLowPOp();
|
||||
~GatherDNNLowPOp() override;
|
||||
bool RunOnDevice() override;
|
||||
|
||||
template <typename Index>
|
||||
|
@ -42,7 +42,7 @@ class BlobsQueueDBCursor : public Cursor {
|
||||
CAFFE_ENFORCE(value_blob_index_ >= 0, "value_blob_index < 0");
|
||||
}
|
||||
|
||||
virtual ~BlobsQueueDBCursor() {}
|
||||
~BlobsQueueDBCursor() override {}
|
||||
|
||||
void Seek(const string& /* unused */) override {
|
||||
CAFFE_THROW("Seek is not supported.");
|
||||
@ -121,7 +121,7 @@ class BlobsQueueDB : public DB {
|
||||
LOG(INFO) << "BlobsQueueDB constructed";
|
||||
}
|
||||
|
||||
virtual ~BlobsQueueDB() {
|
||||
~BlobsQueueDB() override {
|
||||
Close();
|
||||
}
|
||||
|
||||
|
@ -616,8 +616,7 @@ struct UnCopyableDataset : public datasets::Dataset<UnCopyableDataset> {
|
||||
UnCopyableDataset(UnCopyableDataset&&) = default;
|
||||
UnCopyableDataset& operator=(UnCopyableDataset&&) = default;
|
||||
|
||||
// NOLINTNEXTLINE(modernize-use-override)
|
||||
~UnCopyableDataset() = default;
|
||||
~UnCopyableDataset() override = default;
|
||||
|
||||
Example<> get(size_t index) override {
|
||||
return {
|
||||
|
@ -68,8 +68,7 @@ class BackendWithCompiler : public PyTorchBackendInterface {
|
||||
// Constructor.
|
||||
// NOLINTNEXTLINE(modernize-use-equals-default)
|
||||
explicit BackendWithCompiler() {}
|
||||
// NOLINTNEXTLINE(modernize-use-override)
|
||||
virtual ~BackendWithCompiler() = default;
|
||||
virtual ~BackendWithCompiler() override = default;
|
||||
|
||||
bool is_available() override {
|
||||
return true;
|
||||
|
@ -14,8 +14,7 @@ class TestBackend : public PyTorchBackendInterface {
|
||||
// Constructor.
|
||||
// NOLINTNEXTLINE(modernize-use-equals-default)
|
||||
explicit TestBackend() {}
|
||||
// NOLINTNEXTLINE(modernize-use-override)
|
||||
virtual ~TestBackend() = default;
|
||||
virtual ~TestBackend() override = default;
|
||||
|
||||
bool is_available() override {
|
||||
return isAvailable;
|
||||
|
@ -1335,8 +1335,8 @@ class TestThreadLocalDebugInfo : public c10::DebugInfoBase {
|
||||
model_id_ = model_id;
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(modernize-use-override,modernize-use-equals-default)
|
||||
virtual ~TestThreadLocalDebugInfo() {}
|
||||
// NOLINTNEXTLINE(modernize-use-equals-default)
|
||||
virtual ~TestThreadLocalDebugInfo() override {}
|
||||
|
||||
private:
|
||||
int model_id_ = 0;
|
||||
|
@ -118,7 +118,7 @@ class TORCH_API ProcessGroupMPI : public Backend {
|
||||
const c10::optional<std::vector<at::Tensor>>& inputTensors =
|
||||
c10::nullopt);
|
||||
|
||||
virtual ~AsyncWork();
|
||||
~AsyncWork() override;
|
||||
|
||||
bool isCompleted() override;
|
||||
|
||||
@ -144,7 +144,7 @@ class TORCH_API ProcessGroupMPI : public Backend {
|
||||
// Constructor will spawn up the worker thread loop
|
||||
explicit ProcessGroupMPI(int rank, int size, MPI_Comm pgComm);
|
||||
|
||||
virtual ~ProcessGroupMPI();
|
||||
~ProcessGroupMPI() override;
|
||||
|
||||
// Abort the MPI program, needs to be called when exception is detected
|
||||
void abort();
|
||||
|
@ -27,8 +27,7 @@ class XNNPackBackend : public PyTorchBackendInterface {
|
||||
// Constructor.
|
||||
// NOLINTNEXTLINE(modernize-use-equals-default)
|
||||
explicit XNNPackBackend() {}
|
||||
// NOLINTNEXTLINE(modernize-use-override)
|
||||
virtual ~XNNPackBackend() = default;
|
||||
virtual ~XNNPackBackend() override = default;
|
||||
|
||||
bool is_available() override {
|
||||
return xnn_status_success == xnn_initialize(/*allocator=*/nullptr);
|
||||
|
Reference in New Issue
Block a user