From 9e7f22b223b6a9226adeab6d08484710676d4a6b Mon Sep 17 00:00:00 2001 From: Sebastian Messmer Date: Tue, 14 May 2019 14:32:12 -0700 Subject: [PATCH] Remove dependencies from Caffe2Go on PyTorch JIT (#20463) Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/20463 Source file changes mostly involve ifdef'ing-out references to JIT code from files that are part of Caffe2Go. Update Internal build scripts to remove those files from our globs. After this, changes to most of the JIT files should not trigger mobile CI. Reviewed By: dzhulgakov Differential Revision: D15329407 fbshipit-source-id: 48f614c6b028eef0a03ce5161d083a3e078b0412 --- aten/src/ATen/core/aten_interned_strings.h | 2 - aten/src/ATen/core/interned_strings.h | 5 ++- caffe2/core/c10_operator.h | 4 +- caffe2/core/operator.cc | 10 ++++- caffe2/core/operator.h | 50 ++++++++++++++++++++++ caffe2/core/operator_c10wrapper.h | 6 +-- 6 files changed, 67 insertions(+), 10 deletions(-) diff --git a/aten/src/ATen/core/aten_interned_strings.h b/aten/src/ATen/core/aten_interned_strings.h index cec23de35dc7..b6b10ab13e1d 100644 --- a/aten/src/ATen/core/aten_interned_strings.h +++ b/aten/src/ATen/core/aten_interned_strings.h @@ -8,7 +8,6 @@ // To explicitly use interned strings as symbols in your code, you must add // them to this list. -#if !defined(C10_MOBILE) || defined(FEATURE_TORCH_MOBILE) #define FORALL_ATEN_BASE_SYMBOLS(_) \ _(aten, __and__) \ _(aten, __iand__) \ @@ -1013,4 +1012,3 @@ _(attr, workspace) \ _(attr, x) \ _(attr, x1) \ _(attr, x2) -#endif diff --git a/aten/src/ATen/core/interned_strings.h b/aten/src/ATen/core/interned_strings.h index 9939dbf42295..bb4622b72f31 100644 --- a/aten/src/ATen/core/interned_strings.h +++ b/aten/src/ATen/core/interned_strings.h @@ -5,9 +5,12 @@ #include #include -#include #include +#if !defined(C10_MOBILE) || defined(FEATURE_TORCH_MOBILE) +#include +#endif + namespace c10 { #if !defined(C10_MOBILE) || defined(FEATURE_TORCH_MOBILE) diff --git a/caffe2/core/c10_operator.h b/caffe2/core/c10_operator.h index a82d0d8402d6..58be92d57fd2 100644 --- a/caffe2/core/c10_operator.h +++ b/caffe2/core/c10_operator.h @@ -1,10 +1,9 @@ #pragma once +#if !defined(CAFFE2_IS_XPLAT_BUILD) #include #include -#if !defined(CAFFE2_IS_XPLAT_BUILD) #include -#endif #include namespace caffe2 { @@ -156,7 +155,6 @@ inline std::unique_ptr noCache() { * - If your operator has a variable number of input tensors, make the first (!) * input an input of type TensorList. There must be no other tensor inputs. */ -#if !defined(CAFFE2_IS_XPLAT_BUILD) #define C10_DECLARE_CAFFE2_OPERATOR(OperatorName) \ namespace caffe2 { \ namespace _c10_ops { \ diff --git a/caffe2/core/operator.cc b/caffe2/core/operator.cc index 4bd3e1a715ef..db33d8a41f32 100644 --- a/caffe2/core/operator.cc +++ b/caffe2/core/operator.cc @@ -63,8 +63,11 @@ OperatorBase::OperatorBase(const OperatorDef& operator_def, Workspace* ws) type_ = operator_def.type(); } +#if !defined(CAFFE2_IS_XPLAT_BUILD) namespace { -int compute_input_size_(const std::vector& inputs) { +int +C10_UNUSED // Suppress unused function warning on mobile. +compute_input_size_(const std::vector& inputs) { if (inputs.empty()) { return 0; } @@ -103,6 +106,7 @@ OperatorBase::OperatorBase( input_tensors_.resize(input_size_); output_tensors_.resize(newstyle_outputs_.size()); } +#endif vector OperatorBase::InputTensorShapes() const { vector tps; @@ -737,7 +741,11 @@ std::function GetOperatorLogger() { c10::optional OperatorBase::argumentIndexWithName( const std::string& name) const { +#if !defined(CAFFE2_IS_XPLAT_BUILD) return getFunctionSchema().argumentIndexWithName(name); +#else + CAFFE_THROW("Non-legacy operators are not legal in xplat/caffe2"); +#endif } OperatorBase::~OperatorBase() noexcept = default; diff --git a/caffe2/core/operator.h b/caffe2/core/operator.h index 9e4c5952a82f..f2c36d0cc141 100644 --- a/caffe2/core/operator.h +++ b/caffe2/core/operator.h @@ -26,7 +26,9 @@ #include "caffe2/utils/proto_utils.h" #include +#if !defined(CAFFE2_IS_XPLAT_BUILD) #include +#endif C10_DECLARE_bool(caffe2_operator_throw_if_fp_exceptions); @@ -50,10 +52,12 @@ class CAFFE2_API OperatorBase : public Observable { * Alternatively, inputs can be one tensor list ivalue followed by non-tensors * to represent operators with a variable number of inputs. */ +#if !defined(CAFFE2_IS_XPLAT_BUILD) explicit OperatorBase( const c10::FunctionSchema& schema, std::vector inputs, std::vector outputs); +#endif virtual ~OperatorBase() noexcept; @@ -61,12 +65,20 @@ class CAFFE2_API OperatorBase : public Observable { * New operators should be instantiated with FunctionSchema */ bool isLegacyOperator() const { +#if !defined(CAFFE2_IS_XPLAT_BUILD) return !fn_schema_; +#else + return true; +#endif } const c10::FunctionSchema& getFunctionSchema() const { CAFFE_ENFORCE(!isLegacyOperator()); +#if !defined(CAFFE2_IS_XPLAT_BUILD) return *fn_schema_.get(); +#else + CAFFE_THROW("Non-legacy operators are not legal in xplat/caffe2"); +#endif } /** @brief Checks if the operator has an argument of the given name. @@ -88,10 +100,14 @@ class CAFFE2_API OperatorBase : public Observable { return ArgumentHelper::GetSingleArgument( *operator_def_, name, default_value); } +#if !defined(CAFFE2_IS_XPLAT_BUILD) auto index = argumentIndexWithName(name); CAFFE_ENFORCE(index.has_value(), "Couldn't get index for argument!", name); const auto& value = newstyle_inputs_[index.value()]; return value.template to(); +#else + CAFFE_THROW("Non-legacy operators are not legal in xplat/caffe2"); +#endif } template @@ -100,10 +116,12 @@ class CAFFE2_API OperatorBase : public Observable { return ArgumentHelper::HasSingleArgumentOfType( *operator_def_, name); } +#if !defined(CAFFE2_IS_XPLAT_BUILD) template inline vector GetVectorFromIValueList(const c10::IValue& value) const { return value.template to>(); } +#endif template inline vector GetRepeatedArgument( @@ -114,10 +132,14 @@ class CAFFE2_API OperatorBase : public Observable { return ArgumentHelper::GetRepeatedArgument( *operator_def_, name, default_value); } +#if !defined(CAFFE2_IS_XPLAT_BUILD) auto index = argumentIndexWithName(name); CAFFE_ENFORCE(index.has_value(), "Couldn't get index for argument!", name); const auto& value = newstyle_inputs_[index.value()]; return GetVectorFromIValueList(value); +#else + CAFFE_THROW("Non-legacy operators are not legal in xplat/caffe2"); +#endif } // Get the inputs and outputs as specific types. @@ -165,6 +187,7 @@ class CAFFE2_API OperatorBase : public Observable { throw enf; } } +#if !defined(CAFFE2_IS_XPLAT_BUILD) DCHECK_LT(0, newstyle_inputs_.size()); IValue ival; if (newstyle_inputs_[0].isTensorList()) { @@ -186,6 +209,9 @@ class CAFFE2_API OperatorBase : public Observable { CAFFE_ENFORCE_EQ(tensor.GetDeviceType(), type); input_tensors_[idx] = std::move(tensor); return input_tensors_[idx]; +#else + CAFFE_THROW("Non-legacy operators are not legal in xplat/caffe2"); +#endif } template @@ -207,6 +233,7 @@ class CAFFE2_API OperatorBase : public Observable { // When you get a Tensor here it is not fully initialized return BlobGetMutableTensor(outputs_.at(idx), type); } +#if !defined(CAFFE2_IS_XPLAT_BUILD) auto& output = newstyle_outputs_[idx]; Tensor tensor = caffe2::Tensor(output); if (!tensor.defined() || tensor.GetDeviceType() != type) { @@ -216,6 +243,9 @@ class CAFFE2_API OperatorBase : public Observable { } output_tensors_[idx] = caffe2::Tensor(output); return &output_tensors_[idx]; +#else + CAFFE_THROW("Non-legacy operators are not legal in xplat/caffe2"); +#endif } inline Tensor @@ -232,10 +262,14 @@ class CAFFE2_API OperatorBase : public Observable { void SetOutputTensor(int idx, Tensor tensor) { if (!isLegacyOperator()) { +#if !defined(CAFFE2_IS_XPLAT_BUILD) newstyle_outputs_[idx] = at::Tensor(tensor); // also update the tensor in the hack output_tensors_[idx] = std::move(tensor); +#else + CAFFE_THROW("Non-legacy operators are not legal in xplat/caffe2"); +#endif } else { // update the tensor in the workspace BlobSetTensor(outputs_.at(idx), std::move(tensor)); @@ -257,6 +291,7 @@ class CAFFE2_API OperatorBase : public Observable { "device must be provided in options."); return BlobGetMutableTensor(outputs_.at(idx), dims, options); } +#if !defined(CAFFE2_IS_XPLAT_BUILD) auto& output = newstyle_outputs_[idx]; Tensor tensor = GetSizedTensorWithOptions(caffe2::Tensor(output), dims, options); @@ -265,6 +300,9 @@ class CAFFE2_API OperatorBase : public Observable { output_tensors_[idx] = caffe2::Tensor(output); return &output_tensors_[idx]; +#else + CAFFE_THROW("Non-legacy operators are not legal in xplat/caffe2"); +#endif } // Get output Tensor of the operator and CopyFrom the given Tensor @@ -349,7 +387,11 @@ class CAFFE2_API OperatorBase : public Observable { if (isLegacyOperator()) { return outputs_.size(); } +#if !defined(CAFFE2_IS_XPLAT_BUILD) return newstyle_outputs_.size(); +#else + CAFFE_THROW("Non-legacy operators are not legal in xplat/caffe2"); +#endif } inline const vector& Inputs() const { return inputs_; } inline const vector& Outputs() { return outputs_; } @@ -540,9 +582,11 @@ class CAFFE2_API OperatorBase : public Observable { return helper_; } +#if !defined(CAFFE2_IS_XPLAT_BUILD) std::vector move_newstyle_outputs() && { return std::move(newstyle_outputs_); } +#endif public: static const int kNoNetPositionSet = -1; @@ -556,9 +600,11 @@ class CAFFE2_API OperatorBase : public Observable { vector inputs_; vector outputs_; // Preferrably use c10::optional, but nvcc doesn't work +#if !defined(CAFFE2_IS_XPLAT_BUILD) std::unique_ptr fn_schema_; vector newstyle_inputs_; vector newstyle_outputs_; +#endif // HACK // We preserve the fact that Output() returns Tensor* // by storing Tensor in a vector owned by the @@ -618,6 +664,7 @@ inline NetDef OperatorBase::GetSingleArgument( return NetDef(); } +#if !defined(CAFFE2_IS_XPLAT_BUILD) template <> inline vector OperatorBase::GetVectorFromIValueList( const c10::IValue& value) const { @@ -649,6 +696,7 @@ inline vector OperatorBase::GetVectorFromIValueList( vector out; return out; } +#endif // OP_SINGLE_ARG provides a shorter initialization choice for initialization of // member variables for the class constructors. @@ -688,6 +736,7 @@ class Operator : public OperatorBase { // constructors will run on that device. context_.SwitchToDevice(); } +#if !defined(CAFFE2_IS_XPLAT_BUILD) explicit Operator( const c10::FunctionSchema& fn_schema, std::vector inputs, @@ -697,6 +746,7 @@ class Operator : public OperatorBase { // constructors will run on that device. context_.SwitchToDevice(); } +#endif ~Operator() noexcept override {} /// Retrieve a non-owning reference to the input at position 'idx' for this diff --git a/caffe2/core/operator_c10wrapper.h b/caffe2/core/operator_c10wrapper.h index d18d1fcb1858..d505e6d90163 100644 --- a/caffe2/core/operator_c10wrapper.h +++ b/caffe2/core/operator_c10wrapper.h @@ -1,5 +1,7 @@ #pragma once +// TODO Also register c10 operators on mobile +#if !defined(CAFFE2_IS_XPLAT_BUILD) #include #include #include @@ -225,9 +227,8 @@ createC10OperatorWrapper(const char* op_name, const char* overload_name) { } } // namespace detail +} // namespace caffe2 -// TODO Also register c10 operators on mobile -#if !defined(CAFFE2_IS_XPLAT_BUILD) // TODO Currently we only register the CPU variant. This is going to be fixed // once the tensor detemplatization lands. #define REGISTER_C10_OPERATOR_FOR_CAFFE2_DISPATCH_CPU( \ @@ -256,4 +257,3 @@ createC10OperatorWrapper(const char* op_name, const char* overload_name) { #define REGISTER_C10_OPERATOR_FOR_CAFFE2_DISPATCH_HIP( \ OperatorName, Name) #endif -} // namespace caffe2