[codemod] Fix some namespace issues in caffe2 (#121847)

Summary:
Removes `using namespace` from a header file. Having `using namespace` in a header file is *always* a bad idea. A previous raft of diffs provided appropriate qualifications to everything that relied on this `using namespace`, so it is now safe to remove it in this separate diff.

Helps us enable `-Wheader-hygiene`.

Test Plan: Sandcastle

Reviewed By: dmm-fb

Differential Revision: D54838298

Pull Request resolved: https://github.com/pytorch/pytorch/pull/121847
Approved by: https://github.com/Skylion007
This commit is contained in:
Richard Barnes
2024-04-01 17:45:16 +00:00
committed by PyTorch MergeBot
parent 533c1b6c49
commit c422bce131
5 changed files with 35 additions and 39 deletions

View File

@ -3,21 +3,19 @@
#include "caffe2/ideep/ideep_utils.h"
#include "caffe2/proto/caffe2_legacy.pb.h"
using namespace caffe2;
namespace {
class IDEEPConvTransposeUnpoolBase : public IDEEPOperator {
class IDEEPConvTransposeUnpoolBase : public caffe2::IDEEPOperator {
public:
USE_IDEEP_DEF_ALIASES();
USE_IDEEP_OPERATOR_FUNCTIONS();
IDEEPConvTransposeUnpoolBase(const OperatorDef& operator_def, Workspace* ws)
IDEEPConvTransposeUnpoolBase(const caffe2::OperatorDef& operator_def, caffe2::Workspace* ws)
: IDEEPOperator(operator_def, ws),
legacy_pad_(
static_cast<LegacyPadding>(OperatorBase::GetSingleArgument<int>(
static_cast<caffe2::LegacyPadding>(OperatorBase::GetSingleArgument<int>(
"legacy_pad",
LegacyPadding::NOTSET))),
caffe2::LegacyPadding::NOTSET))),
kernel_(OperatorBase::GetRepeatedArgument<int>("kernels")),
stride_(OperatorBase::GetRepeatedArgument<int>("strides")),
pads_(OperatorBase::GetRepeatedArgument<int>("pads")),
@ -26,8 +24,8 @@ class IDEEPConvTransposeUnpoolBase : public IDEEPOperator {
OperatorBase::GetSingleArgument<int>("shared_buffer", 0)) {
// For the padding, they should either be the legacy padding strategy
// (VALID or SAME), or an explicit, non-negative value.
if (legacy_pad_ == LegacyPadding::VALID ||
legacy_pad_ == LegacyPadding::SAME) {
if (legacy_pad_ == caffe2::LegacyPadding::VALID ||
legacy_pad_ == caffe2::LegacyPadding::SAME) {
CAFFE_ENFORCE(
!OperatorBase::HasArgument("pads"),
"If you use legacy padding VALID or SAME, you should not specify "
@ -63,8 +61,8 @@ class IDEEPConvTransposeUnpoolBase : public IDEEPOperator {
if (OperatorBase::HasArgument("pad")) {
CAFFE_ENFORCE(
legacy_pad_ != LegacyPadding::VALID &&
legacy_pad_ != LegacyPadding::SAME,
legacy_pad_ != caffe2::LegacyPadding::VALID &&
legacy_pad_ != caffe2::LegacyPadding::SAME,
"If you use legacy padding VALID or SAME, you should not specify "
"any specific padding values.");
pads_.resize(4, OperatorBase::GetSingleArgument<int>("pad", 0));
@ -74,8 +72,8 @@ class IDEEPConvTransposeUnpoolBase : public IDEEPOperator {
OperatorBase::HasArgument("pad_b") &&
OperatorBase::HasArgument("pad_r")) {
CAFFE_ENFORCE(
legacy_pad_ != LegacyPadding::VALID &&
legacy_pad_ != LegacyPadding::SAME,
legacy_pad_ != caffe2::LegacyPadding::VALID &&
legacy_pad_ != caffe2::LegacyPadding::SAME,
"If you use legacy padding VALID or SAME, you should not specify "
"any specific padding values.");
pads_.push_back(OperatorBase::GetSingleArgument<int>("pad_t", 0));
@ -104,8 +102,8 @@ class IDEEPConvTransposeUnpoolBase : public IDEEPOperator {
CAFFE_ENFORCE_EQ(stride_.size(), kernel_.size());
CAFFE_ENFORCE_EQ(adj_.size(), kernel_.size());
if (legacy_pad_ != LegacyPadding::VALID &&
legacy_pad_ != LegacyPadding::SAME) {
if (legacy_pad_ != caffe2::LegacyPadding::VALID &&
legacy_pad_ != caffe2::LegacyPadding::SAME) {
CAFFE_ENFORCE_EQ(pads_.size(), 2 * kernel_.size());
}
@ -174,13 +172,13 @@ class IDEEPConvTransposeUnpoolBase : public IDEEPOperator {
}
private:
LegacyPadding legacy_pad_;
caffe2::LegacyPadding legacy_pad_;
protected:
vector<int> kernel_;
vector<int> stride_;
vector<int> pads_;
vector<int> adj_;
std::vector<int> kernel_;
std::vector<int> stride_;
std::vector<int> pads_;
std::vector<int> adj_;
bool shared_buffer_;
// Accessors for 2D conv params.
@ -234,7 +232,7 @@ class IDEEPConvTransposeUnpoolBase : public IDEEPOperator {
int* pad_tail,
int* out_size) {
switch (legacy_pad_) {
case LegacyPadding::NOTSET:
case caffe2::LegacyPadding::NOTSET:
CAFFE_ENFORCE_GE(*pad_head, 0);
CAFFE_ENFORCE_GE(*pad_tail, 0);
*out_size =
@ -242,13 +240,13 @@ class IDEEPConvTransposeUnpoolBase : public IDEEPOperator {
break;
// We handle cases of LegacyPadding::VALID and LegacyPadding::SAME
// the same way
case LegacyPadding::VALID:
case LegacyPadding::SAME:
case caffe2::LegacyPadding::VALID:
case caffe2::LegacyPadding::SAME:
*pad_head = 0;
*pad_tail = 0;
*out_size = (in_size - 1) * stride + kernel + adj;
break;
case LegacyPadding::CAFFE_LEGACY_POOLING:
case caffe2::LegacyPadding::CAFFE_LEGACY_POOLING:
LOG(FATAL) << "CAFFE_LEGACY_POOLING is no longer supported.";
break;
}

View File

@ -6,6 +6,8 @@
#include "nomnigraph/Support/Common.h"
#include "nomnigraph/Transformations/SubgraphMatcher.h"
using namespace nom::repr;
namespace caffe2 {
namespace opt {

View File

@ -3,8 +3,7 @@
#include "caffe2/opt/converter.h"
#include "caffe2/opt/passes.h"
namespace caffe2 {
namespace opt {
namespace caffe2::opt {
using namespace nom;
@ -33,7 +32,7 @@ bool fuseConvBNHelper(repr::NNModule* nn, caffe2::Workspace* ws) {
auto bnNode = consumer;
auto bn = repr::nn::get<repr::BatchNormalization>(bnNode);
auto bnOutputs = nn::getOutputs(bnNode);
auto bnOutputs = repr::nn::getOutputs(bnNode);
NOM_REQUIRE_OR_CONT(bnOutputs.size() == 1);
auto bnOutput = bnOutputs.front();
@ -124,5 +123,4 @@ void fuseConvBN(nom::repr::NNModule* nn, caffe2::Workspace* ws) {
REGISTER_WS_OPT_PASS_FROM_FUNC(FuseConvBN, fuseConvBN);
} // namespace opt
} // namespace caffe2
} // namespace caffe2::opt

View File

@ -5,8 +5,8 @@ namespace caffe2 {
C10_DEFINE_REGISTRY(
WorkspaceOptimizationPassRegistry,
WorkspaceOptimizationPass,
NNModule*,
nom::repr::NNModule*,
Workspace*);
C10_DEFINE_REGISTRY(OptimizationPassRegistry, OptimizationPass, NNModule*);
C10_DEFINE_REGISTRY(OptimizationPassRegistry, OptimizationPass, nom::repr::NNModule*);
} // namespace caffe2

View File

@ -7,8 +7,6 @@
#include "nomnigraph/Representations/NeuralNet.h"
using namespace nom::repr;
namespace caffe2 {
/* This file sets up the optimization pass registry.
@ -23,18 +21,18 @@ namespace caffe2 {
class TORCH_API OptimizationPass {
public:
OptimizationPass(NNModule* nn) : nn_(nn) {}
OptimizationPass(nom::repr::NNModule* nn) : nn_(nn) {}
virtual void run() = 0;
virtual ~OptimizationPass() {}
virtual ~OptimizationPass() = default;
protected:
NNModule* nn_;
nom::repr::NNModule* nn_;
};
class TORCH_API WorkspaceOptimizationPass : public OptimizationPass {
public:
WorkspaceOptimizationPass(NNModule* nn, Workspace* ws) : OptimizationPass(nn), ws_(ws) {}
virtual ~WorkspaceOptimizationPass() {}
WorkspaceOptimizationPass(nom::repr::NNModule* nn, Workspace* ws) : OptimizationPass(nn), ws_(ws) {}
virtual ~WorkspaceOptimizationPass() = default;
protected:
Workspace* ws_;
@ -43,7 +41,7 @@ class TORCH_API WorkspaceOptimizationPass : public OptimizationPass {
C10_DECLARE_REGISTRY(
WorkspaceOptimizationPassRegistry,
WorkspaceOptimizationPass,
NNModule*,
nom::repr::NNModule*,
Workspace*);
#define REGISTER_WS_OPT_PASS(clsname) \
C10_REGISTER_CLASS(WorkspaceOptimizationPassRegistry, clsname, clsname)
@ -57,7 +55,7 @@ C10_DECLARE_REGISTRY(
}; \
REGISTER_WS_OPT_PASS(passname);
C10_DECLARE_REGISTRY(OptimizationPassRegistry, OptimizationPass, NNModule*);
C10_DECLARE_REGISTRY(OptimizationPassRegistry, OptimizationPass, nom::repr::NNModule*);
#define REGISTER_OPT_PASS(clsname) \
C10_REGISTER_CLASS(OptimizationPassRegistry, clsname, clsname)
#define REGISTER_OPT_PASS_FROM_FUNC(passname, funcname) \