From 7c1f62782875a4ac70b96aed0e6c9525b6e4eaf3 Mon Sep 17 00:00:00 2001 From: cyy Date: Mon, 7 Jul 2025 16:57:43 +0000 Subject: [PATCH] Fix 'dllimport attribute ignored on inline function' (#157670) There are lots of warnings in builds: ``` 2025-07-05T16:59:46.9208806Z C:\actions-runner\_work\pytorch\pytorch\build\aten\src\ATen\core\TensorBody.h(5043,29): warning: 'at::Tensor::less_' redeclared inline; 'dllimport' attribute ignored [-Wignored-attributes] 2025-07-05T16:59:46.9209030Z 5043 | inline at::Tensor & Tensor::less_(const at::Scalar & other) const { 2025-07-05T16:59:46.9209104Z | ^ 2025-07-05T16:59:46.9209671Z C:\actions-runner\_work\pytorch\pytorch\build\aten\src\ATen\core\TensorBody.h(5048,29): warning: 'at::Tensor::less_' redeclared inline; 'dllimport' attribute ignored [-Wignored-attributes] 2025-07-05T16:59:46.9209860Z 5048 | inline at::Tensor & Tensor::less_(const at::Tensor & other) const ``` This PR has fixed them and turned the warning into an error. Pull Request resolved: https://github.com/pytorch/pytorch/pull/157670 Approved by: https://github.com/albanD --- aten/src/ATen/DeviceAccelerator.h | 2 +- aten/src/ATen/FunctionalTensorWrapper.h | 2 +- aten/src/ATen/NamedTensorUtils.h | 8 ++-- aten/src/ATen/autocast_mode.h | 16 ++++---- aten/src/ATen/core/jit_type_base.h | 2 +- aten/src/ATen/ops/from_blob.h | 2 +- aten/src/ATen/templates/Functions.h | 8 ++-- c10/core/DispatchKeySet.h | 2 +- c10/util/BFloat16.h | 4 +- c10/util/Float8_e4m3fn.h | 4 +- c10/util/Float8_e4m3fnuz.h | 2 +- c10/util/Float8_e5m2.h | 4 +- c10/util/Float8_e5m2fnuz.h | 2 +- c10/util/Float8_e8m0fnu.h | 2 +- c10/util/Half.h | 2 +- c10/util/Logging.h | 4 +- c10/util/int128.h | 40 +++++++++---------- cmake/public/utils.cmake | 1 + torch/csrc/autograd/FunctionsManual.h | 4 +- torch/csrc/jit/runtime/static/impl.h | 6 +-- torch/csrc/jit/serialization/pickler_helper.h | 2 +- torch/csrc/monitor/events.h | 2 +- torch/csrc/profiler/util.h | 2 +- 23 files changed, 57 insertions(+), 66 deletions(-) diff --git a/aten/src/ATen/DeviceAccelerator.h b/aten/src/ATen/DeviceAccelerator.h index 5dd2d2d8b5a3..f37e492c861f 100644 --- a/aten/src/ATen/DeviceAccelerator.h +++ b/aten/src/ATen/DeviceAccelerator.h @@ -30,7 +30,7 @@ TORCH_API bool isAccelerator(c10::DeviceType device_type); template < typename... T, typename = std::enable_if_t<(std::is_same_v && ...)>> -TORCH_API inline bool isAcceleratorExcluded( +inline bool isAcceleratorExcluded( c10::DeviceType device_type, c10::DeviceType first_excluded, T... rest_excluded) { diff --git a/aten/src/ATen/FunctionalTensorWrapper.h b/aten/src/ATen/FunctionalTensorWrapper.h index bec2d463196c..b260b7c9f958 100644 --- a/aten/src/ATen/FunctionalTensorWrapper.h +++ b/aten/src/ATen/FunctionalTensorWrapper.h @@ -300,7 +300,7 @@ struct TORCH_API FunctionalTensorWrapper : public c10::TensorImpl { namespace functionalization { namespace impl { -TORCH_API inline FunctionalTensorWrapper* unsafeGetFunctionalWrapper( +inline FunctionalTensorWrapper* unsafeGetFunctionalWrapper( const Tensor& tensor) { auto functional_impl = static_cast(tensor.unsafeGetTensorImpl()); diff --git a/aten/src/ATen/NamedTensorUtils.h b/aten/src/ATen/NamedTensorUtils.h index 966145ae23fa..c6198dccd243 100644 --- a/aten/src/ATen/NamedTensorUtils.h +++ b/aten/src/ATen/NamedTensorUtils.h @@ -167,14 +167,14 @@ TORCH_API TensorImpl* propagate_names( TORCH_API void propagate_names(TensorImpl* result, /*const */ TensorImpl* src); -TORCH_API inline void propagate_names( +inline void propagate_names( const TensorBase& result, DimnameList names, bool validate_names = false) { propagate_names(result.unsafeGetTensorImpl(), names, validate_names); } -TORCH_API inline void propagate_names_if_nonempty( +inline void propagate_names_if_nonempty( const TensorBase& result, DimnameList names, bool validate_names = false) { @@ -182,9 +182,7 @@ TORCH_API inline void propagate_names_if_nonempty( result.unsafeGetTensorImpl(), names, validate_names); } -TORCH_API inline void propagate_names( - const TensorBase& result, - const TensorBase& src) { +inline void propagate_names(const TensorBase& result, const TensorBase& src) { propagate_names(result.unsafeGetTensorImpl(), src.unsafeGetTensorImpl()); } diff --git a/aten/src/ATen/autocast_mode.h b/aten/src/ATen/autocast_mode.h index 2d9e90a86766..a222b8924bac 100644 --- a/aten/src/ATen/autocast_mode.h +++ b/aten/src/ATen/autocast_mode.h @@ -25,7 +25,7 @@ TORCH_API void set_autocast_cache_enabled(bool enabled); // deprecated CUDA-specific autocast APIs C10_DEPRECATED_MESSAGE( "at::autocast::is_enabled() is deprecated. Please use at::autocast::is_autocast_enabled(at::kCUDA) instead.") -TORCH_API inline bool is_enabled() { +inline bool is_enabled() { TORCH_WARN_DEPRECATION( "at::autocast::", __func__, @@ -34,7 +34,7 @@ TORCH_API inline bool is_enabled() { } C10_DEPRECATED_MESSAGE( "at::autocast::set_enabled(enabled) is deprecated. Please use at::autocast::set_autocast_enabled(at::kCUDA, enabled) instead.") -TORCH_API inline void set_enabled(bool enabled) { +inline void set_enabled(bool enabled) { TORCH_WARN_DEPRECATION( "at::autocast::", __func__, @@ -43,7 +43,7 @@ TORCH_API inline void set_enabled(bool enabled) { } C10_DEPRECATED_MESSAGE( "at::autocast::get_autocast_gpu_dtype() is deprecated. Please use at::autocast::get_autocast_dtype(at::kCUDA) instead.") -TORCH_API inline at::ScalarType get_autocast_gpu_dtype() { +inline at::ScalarType get_autocast_gpu_dtype() { TORCH_WARN_DEPRECATION( "at::autocast::", __func__, @@ -52,7 +52,7 @@ TORCH_API inline at::ScalarType get_autocast_gpu_dtype() { } C10_DEPRECATED_MESSAGE( "at::autocast::set_autocast_gpu_dtype(dtype) is deprecated. Please use at::autocast::set_autocast_dtype(at::kCUDA, dtype) instead.") -TORCH_API inline void set_autocast_gpu_dtype(at::ScalarType dtype) { +inline void set_autocast_gpu_dtype(at::ScalarType dtype) { TORCH_WARN_DEPRECATION( "at::autocast::", __func__, @@ -65,7 +65,7 @@ TORCH_API inline void set_autocast_gpu_dtype(at::ScalarType dtype) { "at::autocast::is_" #name \ "_enabled() is deprecated. Please use at::autocast::is_autocast_enabled(" #device_type \ ") instead.") \ - TORCH_API inline bool is_##name##_enabled() { \ + inline bool is_##name##_enabled() { \ TORCH_WARN_DEPRECATION( \ "at::autocast::", \ __func__, \ @@ -78,7 +78,7 @@ TORCH_API inline void set_autocast_gpu_dtype(at::ScalarType dtype) { "at::autocast::set_" #name \ "_enabled(enabled) is deprecated. Please use at::autocast::set_autocast_enabled(" #device_type \ ", enabled) instead.") \ - TORCH_API inline void set_##name##_enabled(bool enabled) { \ + inline void set_##name##_enabled(bool enabled) { \ TORCH_WARN_DEPRECATION( \ "at::autocast::", \ __func__, \ @@ -91,7 +91,7 @@ TORCH_API inline void set_autocast_gpu_dtype(at::ScalarType dtype) { "at::autocast::get_autocast_" #name \ "_dtype() is deprecated. Please use at::autocast::get_autocast_dtype(" #device_type \ ") instead.") \ - TORCH_API inline at::ScalarType get_autocast_##name##_dtype() { \ + inline at::ScalarType get_autocast_##name##_dtype() { \ TORCH_WARN_DEPRECATION( \ "at::autocast::", \ __func__, \ @@ -104,7 +104,7 @@ TORCH_API inline void set_autocast_gpu_dtype(at::ScalarType dtype) { "at::autocast::set_autocast_" #name \ "_dtype(dtype) is deprecated. Please use at::autocast::set_autocast_dtype(" #device_type \ ", dtype) instead.") \ - TORCH_API inline void set_autocast_##name##_dtype(at::ScalarType dtype) { \ + inline void set_autocast_##name##_dtype(at::ScalarType dtype) { \ TORCH_WARN_DEPRECATION( \ "at::autocast::", \ __func__, \ diff --git a/aten/src/ATen/core/jit_type_base.h b/aten/src/ATen/core/jit_type_base.h index de440787ee68..18077ad9f6b3 100644 --- a/aten/src/ATen/core/jit_type_base.h +++ b/aten/src/ATen/core/jit_type_base.h @@ -677,7 +677,7 @@ inline TypePtr Type::withContained(std::vector contained_types) { } -TORCH_API inline bool operator==(const Type& lhs, const Type& rhs) { +inline bool operator==(const Type& lhs, const Type& rhs) { if (C10_UNLIKELY(!rhs.symmetric())) { return rhs.equals(lhs); } diff --git a/aten/src/ATen/ops/from_blob.h b/aten/src/ATen/ops/from_blob.h index e82fc4f3e7ab..a209380abb64 100644 --- a/aten/src/ATen/ops/from_blob.h +++ b/aten/src/ATen/ops/from_blob.h @@ -5,7 +5,7 @@ namespace at { namespace detail { -TORCH_API inline void noopDelete(void*) {} +inline void noopDelete(void*) {} } // namespace detail diff --git a/aten/src/ATen/templates/Functions.h b/aten/src/ATen/templates/Functions.h index 1f010ccec48b..b1feaf9d4daa 100644 --- a/aten/src/ATen/templates/Functions.h +++ b/aten/src/ATen/templates/Functions.h @@ -83,16 +83,16 @@ ${Functions_declarations} // Special C++ only overloads for std()-like functions (See gh-40287) // These are needed because int -> bool conversion takes precedence over int -> IntArrayRef // So, for example std(0) would select the std(unbiased=False) overload -TORCH_API inline Tensor var(const Tensor& self, int dim) { +inline Tensor var(const Tensor& self, int dim) { return at::var(self, IntArrayRef{dim}); } -TORCH_API inline std::tuple var_mean(const Tensor& self, int dim) { +inline std::tuple var_mean(const Tensor& self, int dim) { return at::var_mean(self, IntArrayRef{dim}); } -TORCH_API inline Tensor std(const Tensor& self, int dim) { +inline Tensor std(const Tensor& self, int dim) { return at::std(self, IntArrayRef{dim}); } -TORCH_API inline std::tuple std_mean(const Tensor& self, int dim) { +inline std::tuple std_mean(const Tensor& self, int dim) { return at::std_mean(self, IntArrayRef{dim}); } diff --git a/c10/core/DispatchKeySet.h b/c10/core/DispatchKeySet.h index 4de19c9ce5bf..dea4c5a55de7 100644 --- a/c10/core/DispatchKeySet.h +++ b/c10/core/DispatchKeySet.h @@ -634,7 +634,7 @@ class DispatchKeySet final { C10_API std::string toString(DispatchKeySet); C10_API std::ostream& operator<<(std::ostream&, DispatchKeySet); -C10_API inline int getDispatchTableIndexForDispatchKey(DispatchKey k) { +inline int getDispatchTableIndexForDispatchKey(DispatchKey k) { return DispatchKeySet(k).getDispatchTableIndexForDispatchKeySet(); } diff --git a/c10/util/BFloat16.h b/c10/util/BFloat16.h index 0f7cecda46b6..c4fc5943ccef 100644 --- a/c10/util/BFloat16.h +++ b/c10/util/BFloat16.h @@ -111,9 +111,7 @@ struct alignas(2) BFloat16 { #endif }; -C10_API inline std::ostream& operator<<( - std::ostream& out, - const BFloat16& value) { +inline std::ostream& operator<<(std::ostream& out, const BFloat16& value) { out << (float)value; return out; } diff --git a/c10/util/Float8_e4m3fn.h b/c10/util/Float8_e4m3fn.h index af1119654083..529a04f24d56 100644 --- a/c10/util/Float8_e4m3fn.h +++ b/c10/util/Float8_e4m3fn.h @@ -228,9 +228,7 @@ struct alignas(1) Float8_e4m3fn { inline C10_HOST_DEVICE bool isnan() const; }; -C10_API inline std::ostream& operator<<( - std::ostream& out, - const Float8_e4m3fn& value) { +inline std::ostream& operator<<(std::ostream& out, const Float8_e4m3fn& value) { out << (float)value; return out; } diff --git a/c10/util/Float8_e4m3fnuz.h b/c10/util/Float8_e4m3fnuz.h index 6738e9d73c40..f5de58f12a11 100644 --- a/c10/util/Float8_e4m3fnuz.h +++ b/c10/util/Float8_e4m3fnuz.h @@ -127,7 +127,7 @@ struct alignas(1) Float8_e4m3fnuz { inline C10_HOST_DEVICE bool isnan() const; }; -C10_API inline std::ostream& operator<<( +inline std::ostream& operator<<( std::ostream& out, const Float8_e4m3fnuz& value) { out << (float)value; diff --git a/c10/util/Float8_e5m2.h b/c10/util/Float8_e5m2.h index 442b7ee87e3a..8f70b77bcd6e 100644 --- a/c10/util/Float8_e5m2.h +++ b/c10/util/Float8_e5m2.h @@ -136,9 +136,7 @@ struct alignas(1) Float8_e5m2 { inline C10_HOST_DEVICE bool isinf() const; }; -C10_API inline std::ostream& operator<<( - std::ostream& out, - const Float8_e5m2& value) { +inline std::ostream& operator<<(std::ostream& out, const Float8_e5m2& value) { out << (float)value; return out; } diff --git a/c10/util/Float8_e5m2fnuz.h b/c10/util/Float8_e5m2fnuz.h index 145464e2cfff..9b8c2505ab1f 100644 --- a/c10/util/Float8_e5m2fnuz.h +++ b/c10/util/Float8_e5m2fnuz.h @@ -126,7 +126,7 @@ struct alignas(1) Float8_e5m2fnuz { inline C10_HOST_DEVICE bool isinf() const; }; -C10_API inline std::ostream& operator<<( +inline std::ostream& operator<<( std::ostream& out, const Float8_e5m2fnuz& value) { out << (float)value; diff --git a/c10/util/Float8_e8m0fnu.h b/c10/util/Float8_e8m0fnu.h index 91db84091740..0ae3b5012f8f 100644 --- a/c10/util/Float8_e8m0fnu.h +++ b/c10/util/Float8_e8m0fnu.h @@ -108,7 +108,7 @@ struct alignas(1) Float8_e8m0fnu { inline C10_HOST_DEVICE bool isnan() const; }; -C10_API inline std::ostream& operator<<( +inline std::ostream& operator<<( std::ostream& out, const Float8_e8m0fnu& value) { out << (float)value; diff --git a/c10/util/Half.h b/c10/util/Half.h index 373881f21e58..bdcf74581452 100644 --- a/c10/util/Half.h +++ b/c10/util/Half.h @@ -414,7 +414,7 @@ struct alignas(2) Half { #endif }; -C10_API inline std::ostream& operator<<(std::ostream& out, const Half& value) { +inline std::ostream& operator<<(std::ostream& out, const Half& value) { out << (float)value; return out; } diff --git a/c10/util/Logging.h b/c10/util/Logging.h index 0d6a1b326322..2a08b1f1ce69 100644 --- a/c10/util/Logging.h +++ b/c10/util/Logging.h @@ -79,7 +79,7 @@ C10_API void UpdateLoggingLevelsFromFlags(); const char* msg, const void* caller = nullptr); -[[noreturn]] C10_API inline void ThrowEnforceNotMet( +[[noreturn]] inline void ThrowEnforceNotMet( const char* file, const int line, const char* condition, @@ -102,7 +102,7 @@ C10_API void UpdateLoggingLevelsFromFlags(); const char* msg, const void* caller = nullptr); -[[noreturn]] C10_API inline void ThrowEnforceFiniteNotMet( +[[noreturn]] inline void ThrowEnforceFiniteNotMet( const char* file, const int line, const char* condition, diff --git a/c10/util/int128.h b/c10/util/int128.h index 7da595b79178..4bea5a5f1197 100644 --- a/c10/util/int128.h +++ b/c10/util/int128.h @@ -154,23 +154,23 @@ inline bool operator!=(const uint128& lhs, const uint128& rhs) { return !(lhs == rhs); } -C10_API inline UINT128_CONSTEXPR uint128::uint128() : lo_(0), hi_(0) {} -C10_API inline UINT128_CONSTEXPR uint128::uint128(uint64_t top, uint64_t bottom) +inline UINT128_CONSTEXPR uint128::uint128() : lo_(0), hi_(0) {} +inline UINT128_CONSTEXPR uint128::uint128(uint64_t top, uint64_t bottom) : lo_(bottom), hi_(top) {} -C10_API inline UINT128_CONSTEXPR uint128::uint128(const uint128_pod& v) +inline UINT128_CONSTEXPR uint128::uint128(const uint128_pod& v) : lo_(v.lo), hi_(v.hi) {} -C10_API inline UINT128_CONSTEXPR uint128::uint128(uint64_t bottom) +inline UINT128_CONSTEXPR uint128::uint128(uint64_t bottom) : lo_(bottom), hi_(0) {} #ifndef SWIG -C10_API inline UINT128_CONSTEXPR uint128::uint128(uint32_t bottom) +inline UINT128_CONSTEXPR uint128::uint128(uint32_t bottom) : lo_(bottom), hi_(0) {} -C10_API inline UINT128_CONSTEXPR uint128::uint128(int bottom) +inline UINT128_CONSTEXPR uint128::uint128(int bottom) : lo_(bottom), hi_(static_cast((bottom < 0) ? -1 : 0)) {} #endif #undef UINT128_CONSTEXPR -C10_API inline void uint128::Initialize(uint64_t top, uint64_t bottom) { +inline void uint128::Initialize(uint64_t top, uint64_t bottom) { hi_ = top; lo_ = bottom; } @@ -226,11 +226,11 @@ LOGIC128(^) #undef LOGIC128 -#define LOGICASSIGN128(op) \ - C10_API inline uint128& uint128::operator op(const uint128 & other) { \ - hi_ op other.hi_; \ - lo_ op other.lo_; \ - return *this; \ +#define LOGICASSIGN128(op) \ + inline uint128& uint128::operator op(const uint128 & other) { \ + hi_ op other.hi_; \ + lo_ op other.lo_; \ + return *this; \ } LOGICASSIGN128(|=) @@ -295,7 +295,7 @@ inline uint128& operator<<=(uint128& self, int amount) { return self; } -C10_API inline uint128& uint128::operator>>=(int amount) { +inline uint128& uint128::operator>>=(int amount) { // uint64_t shifts of >= 64 are undefined, so we will need some // special-casing. if (amount < 64) { @@ -333,7 +333,7 @@ inline uint128 operator%(const uint128& lhs, const uint128& rhs) { return uint128(lhs) %= rhs; } -C10_API inline uint128& uint128::operator+=(const uint128& b) { +inline uint128& uint128::operator+=(const uint128& b) { hi_ += b.hi_; uint64_t lolo = lo_ + b.lo_; if (lolo < lo_) @@ -342,7 +342,7 @@ C10_API inline uint128& uint128::operator+=(const uint128& b) { return *this; } -C10_API inline uint128& uint128::operator-=(const uint128& b) { +inline uint128& uint128::operator-=(const uint128& b) { hi_ -= b.hi_; if (b.lo_ > lo_) --hi_; @@ -350,7 +350,7 @@ C10_API inline uint128& uint128::operator-=(const uint128& b) { return *this; } -C10_API inline uint128& uint128::operator*=(const uint128& b) { +inline uint128& uint128::operator*=(const uint128& b) { uint64_t a96 = hi_ >> 32; uint64_t a64 = hi_ & 0xffffffffu; uint64_t a32 = lo_ >> 32; @@ -373,24 +373,24 @@ C10_API inline uint128& uint128::operator*=(const uint128& b) { return *this; } -C10_API inline uint128 uint128::operator++(int) { +inline uint128 uint128::operator++(int) { uint128 tmp(*this); *this += 1; return tmp; } -C10_API inline uint128 uint128::operator--(int) { +inline uint128 uint128::operator--(int) { uint128 tmp(*this); *this -= 1; return tmp; } -C10_API inline uint128& uint128::operator++() { +inline uint128& uint128::operator++() { *this += 1; return *this; } -C10_API inline uint128& uint128::operator--() { +inline uint128& uint128::operator--() { *this -= 1; return *this; } diff --git a/cmake/public/utils.cmake b/cmake/public/utils.cmake index d56dd74d6c02..a553a2936f79 100644 --- a/cmake/public/utils.cmake +++ b/cmake/public/utils.cmake @@ -418,6 +418,7 @@ function(torch_compile_options libname) if(WERROR) list(APPEND private_compile_options -Werror + -Werror=ignored-attributes -Werror=inconsistent-missing-override -Werror=inconsistent-missing-destructor-override -Werror=pedantic diff --git a/torch/csrc/autograd/FunctionsManual.h b/torch/csrc/autograd/FunctionsManual.h index 1bbad0ae92dd..0b659973ec34 100644 --- a/torch/csrc/autograd/FunctionsManual.h +++ b/torch/csrc/autograd/FunctionsManual.h @@ -35,9 +35,7 @@ TORCH_API Tensor toNonOptFwGrad(const std::optional& t); TORCH_API Tensor toNonOptPrimal(const std::optional& t); TORCH_API Tensor toNonOptTensor(const std::optional& t); -TORCH_API inline std::optional wrap_opt_if( - const Tensor& t, - const bool cond) { +inline std::optional wrap_opt_if(const Tensor& t, const bool cond) { using OptTensor = std::optional; return cond ? OptTensor(t) : static_cast(std::nullopt); } diff --git a/torch/csrc/jit/runtime/static/impl.h b/torch/csrc/jit/runtime/static/impl.h index f92a28d5d6cf..b25f63c939b0 100644 --- a/torch/csrc/jit/runtime/static/impl.h +++ b/torch/csrc/jit/runtime/static/impl.h @@ -29,7 +29,7 @@ TORCH_API std::string dumpValueSet( const c10::FastSet& value_set, const char* set_name = ""); -TORCH_API inline bool doesNotHeapAllocateWhenStoredInIValue(const Type& type) { +inline bool doesNotHeapAllocateWhenStoredInIValue(const Type& type) { switch (type.kind()) { // NOTE: NumberType may allocate because it includes complex. case TypeKind::NoneType: @@ -44,11 +44,11 @@ TORCH_API inline bool doesNotHeapAllocateWhenStoredInIValue(const Type& type) { } } -TORCH_API inline c10::Symbol getStaticRuntimeMetadataSymbol() { +inline c10::Symbol getStaticRuntimeMetadataSymbol() { return Symbol::attr("static_runtime::metadata"); } -TORCH_API inline bool borrowsOutputs(c10::Symbol kind) { +inline bool borrowsOutputs(c10::Symbol kind) { static const std::array symbols_with_borrowed_outputs = { c10::Symbol::fromQualString("static_runtime::select_tensor"), c10::Symbol::fromQualString("static_runtime::dict_unpack"), diff --git a/torch/csrc/jit/serialization/pickler_helper.h b/torch/csrc/jit/serialization/pickler_helper.h index c1ac5f6feb03..9a52585254eb 100644 --- a/torch/csrc/jit/serialization/pickler_helper.h +++ b/torch/csrc/jit/serialization/pickler_helper.h @@ -202,7 +202,7 @@ inline void setTensorMetadata( } // Register function pointer of Tensor BackendMetadata for serialization. -TORCH_API inline void TensorBackendMetaRegistry( +inline void TensorBackendMetaRegistry( c10::DeviceType t, const BackendMetaPtr& get_fptr, const BackendMetaPtr& set_fptr) { diff --git a/torch/csrc/monitor/events.h b/torch/csrc/monitor/events.h index 1c54373342df..2ec89251c62e 100644 --- a/torch/csrc/monitor/events.h +++ b/torch/csrc/monitor/events.h @@ -35,7 +35,7 @@ struct TORCH_API Event { std::unordered_map data; }; -TORCH_API inline bool operator==(const Event& lhs, const Event& rhs) { +inline bool operator==(const Event& lhs, const Event& rhs) { return lhs.name == rhs.name && lhs.timestamp == rhs.timestamp && lhs.data == rhs.data; } diff --git a/torch/csrc/profiler/util.h b/torch/csrc/profiler/util.h index 03c6ef869a0c..f2ae57fa0e59 100644 --- a/torch/csrc/profiler/util.h +++ b/torch/csrc/profiler/util.h @@ -43,7 +43,7 @@ TORCH_API void logSoftAssert( uint32_t line, const char* cond, const char* args); -TORCH_API inline void logSoftAssert( +inline void logSoftAssert( const char* func, const char* file, uint32_t line,