Fix 'dllimport attribute ignored on inline function' (#157670)

There are lots of warnings in builds:
```
 2025-07-05T16:59:46.9208806Z C:\actions-runner\_work\pytorch\pytorch\build\aten\src\ATen\core\TensorBody.h(5043,29): warning: 'at::Tensor::less_' redeclared inline; 'dllimport' attribute ignored [-Wignored-attributes]
2025-07-05T16:59:46.9209030Z  5043 | inline at::Tensor & Tensor::less_(const at::Scalar & other) const {
2025-07-05T16:59:46.9209104Z       |                             ^
2025-07-05T16:59:46.9209671Z C:\actions-runner\_work\pytorch\pytorch\build\aten\src\ATen\core\TensorBody.h(5048,29): warning: 'at::Tensor::less_' redeclared inline; 'dllimport' attribute ignored [-Wignored-attributes]
2025-07-05T16:59:46.9209860Z  5048 | inline at::Tensor & Tensor::less_(const at::Tensor & other) const
```
This PR has fixed them and turned the warning into an error.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/157670
Approved by: https://github.com/albanD
This commit is contained in:
cyy
2025-07-07 16:57:43 +00:00
committed by PyTorch MergeBot
parent b3b4d28f4c
commit 7c1f627828
23 changed files with 57 additions and 66 deletions

View File

@ -30,7 +30,7 @@ TORCH_API bool isAccelerator(c10::DeviceType device_type);
template <
typename... T,
typename = std::enable_if_t<(std::is_same_v<T, c10::DeviceType> && ...)>>
TORCH_API inline bool isAcceleratorExcluded(
inline bool isAcceleratorExcluded(
c10::DeviceType device_type,
c10::DeviceType first_excluded,
T... rest_excluded) {

View File

@ -300,7 +300,7 @@ struct TORCH_API FunctionalTensorWrapper : public c10::TensorImpl {
namespace functionalization {
namespace impl {
TORCH_API inline FunctionalTensorWrapper* unsafeGetFunctionalWrapper(
inline FunctionalTensorWrapper* unsafeGetFunctionalWrapper(
const Tensor& tensor) {
auto functional_impl =
static_cast<FunctionalTensorWrapper*>(tensor.unsafeGetTensorImpl());

View File

@ -167,14 +167,14 @@ TORCH_API TensorImpl* propagate_names(
TORCH_API void propagate_names(TensorImpl* result, /*const */ TensorImpl* src);
TORCH_API inline void propagate_names(
inline void propagate_names(
const TensorBase& result,
DimnameList names,
bool validate_names = false) {
propagate_names(result.unsafeGetTensorImpl(), names, validate_names);
}
TORCH_API inline void propagate_names_if_nonempty(
inline void propagate_names_if_nonempty(
const TensorBase& result,
DimnameList names,
bool validate_names = false) {
@ -182,9 +182,7 @@ TORCH_API inline void propagate_names_if_nonempty(
result.unsafeGetTensorImpl(), names, validate_names);
}
TORCH_API inline void propagate_names(
const TensorBase& result,
const TensorBase& src) {
inline void propagate_names(const TensorBase& result, const TensorBase& src) {
propagate_names(result.unsafeGetTensorImpl(), src.unsafeGetTensorImpl());
}

View File

@ -25,7 +25,7 @@ TORCH_API void set_autocast_cache_enabled(bool enabled);
// deprecated CUDA-specific autocast APIs
C10_DEPRECATED_MESSAGE(
"at::autocast::is_enabled() is deprecated. Please use at::autocast::is_autocast_enabled(at::kCUDA) instead.")
TORCH_API inline bool is_enabled() {
inline bool is_enabled() {
TORCH_WARN_DEPRECATION(
"at::autocast::",
__func__,
@ -34,7 +34,7 @@ TORCH_API inline bool is_enabled() {
}
C10_DEPRECATED_MESSAGE(
"at::autocast::set_enabled(enabled) is deprecated. Please use at::autocast::set_autocast_enabled(at::kCUDA, enabled) instead.")
TORCH_API inline void set_enabled(bool enabled) {
inline void set_enabled(bool enabled) {
TORCH_WARN_DEPRECATION(
"at::autocast::",
__func__,
@ -43,7 +43,7 @@ TORCH_API inline void set_enabled(bool enabled) {
}
C10_DEPRECATED_MESSAGE(
"at::autocast::get_autocast_gpu_dtype() is deprecated. Please use at::autocast::get_autocast_dtype(at::kCUDA) instead.")
TORCH_API inline at::ScalarType get_autocast_gpu_dtype() {
inline at::ScalarType get_autocast_gpu_dtype() {
TORCH_WARN_DEPRECATION(
"at::autocast::",
__func__,
@ -52,7 +52,7 @@ TORCH_API inline at::ScalarType get_autocast_gpu_dtype() {
}
C10_DEPRECATED_MESSAGE(
"at::autocast::set_autocast_gpu_dtype(dtype) is deprecated. Please use at::autocast::set_autocast_dtype(at::kCUDA, dtype) instead.")
TORCH_API inline void set_autocast_gpu_dtype(at::ScalarType dtype) {
inline void set_autocast_gpu_dtype(at::ScalarType dtype) {
TORCH_WARN_DEPRECATION(
"at::autocast::",
__func__,
@ -65,7 +65,7 @@ TORCH_API inline void set_autocast_gpu_dtype(at::ScalarType dtype) {
"at::autocast::is_" #name \
"_enabled() is deprecated. Please use at::autocast::is_autocast_enabled(" #device_type \
") instead.") \
TORCH_API inline bool is_##name##_enabled() { \
inline bool is_##name##_enabled() { \
TORCH_WARN_DEPRECATION( \
"at::autocast::", \
__func__, \
@ -78,7 +78,7 @@ TORCH_API inline void set_autocast_gpu_dtype(at::ScalarType dtype) {
"at::autocast::set_" #name \
"_enabled(enabled) is deprecated. Please use at::autocast::set_autocast_enabled(" #device_type \
", enabled) instead.") \
TORCH_API inline void set_##name##_enabled(bool enabled) { \
inline void set_##name##_enabled(bool enabled) { \
TORCH_WARN_DEPRECATION( \
"at::autocast::", \
__func__, \
@ -91,7 +91,7 @@ TORCH_API inline void set_autocast_gpu_dtype(at::ScalarType dtype) {
"at::autocast::get_autocast_" #name \
"_dtype() is deprecated. Please use at::autocast::get_autocast_dtype(" #device_type \
") instead.") \
TORCH_API inline at::ScalarType get_autocast_##name##_dtype() { \
inline at::ScalarType get_autocast_##name##_dtype() { \
TORCH_WARN_DEPRECATION( \
"at::autocast::", \
__func__, \
@ -104,7 +104,7 @@ TORCH_API inline void set_autocast_gpu_dtype(at::ScalarType dtype) {
"at::autocast::set_autocast_" #name \
"_dtype(dtype) is deprecated. Please use at::autocast::set_autocast_dtype(" #device_type \
", dtype) instead.") \
TORCH_API inline void set_autocast_##name##_dtype(at::ScalarType dtype) { \
inline void set_autocast_##name##_dtype(at::ScalarType dtype) { \
TORCH_WARN_DEPRECATION( \
"at::autocast::", \
__func__, \

View File

@ -677,7 +677,7 @@ inline TypePtr Type::withContained(std::vector<TypePtr> contained_types) {
}
TORCH_API inline bool operator==(const Type& lhs, const Type& rhs) {
inline bool operator==(const Type& lhs, const Type& rhs) {
if (C10_UNLIKELY(!rhs.symmetric())) {
return rhs.equals(lhs);
}

View File

@ -5,7 +5,7 @@ namespace at {
namespace detail {
TORCH_API inline void noopDelete(void*) {}
inline void noopDelete(void*) {}
} // namespace detail

View File

@ -83,16 +83,16 @@ ${Functions_declarations}
// Special C++ only overloads for std()-like functions (See gh-40287)
// These are needed because int -> bool conversion takes precedence over int -> IntArrayRef
// So, for example std(0) would select the std(unbiased=False) overload
TORCH_API inline Tensor var(const Tensor& self, int dim) {
inline Tensor var(const Tensor& self, int dim) {
return at::var(self, IntArrayRef{dim});
}
TORCH_API inline std::tuple<Tensor, Tensor> var_mean(const Tensor& self, int dim) {
inline std::tuple<Tensor, Tensor> var_mean(const Tensor& self, int dim) {
return at::var_mean(self, IntArrayRef{dim});
}
TORCH_API inline Tensor std(const Tensor& self, int dim) {
inline Tensor std(const Tensor& self, int dim) {
return at::std(self, IntArrayRef{dim});
}
TORCH_API inline std::tuple<Tensor, Tensor> std_mean(const Tensor& self, int dim) {
inline std::tuple<Tensor, Tensor> std_mean(const Tensor& self, int dim) {
return at::std_mean(self, IntArrayRef{dim});
}

View File

@ -634,7 +634,7 @@ class DispatchKeySet final {
C10_API std::string toString(DispatchKeySet);
C10_API std::ostream& operator<<(std::ostream&, DispatchKeySet);
C10_API inline int getDispatchTableIndexForDispatchKey(DispatchKey k) {
inline int getDispatchTableIndexForDispatchKey(DispatchKey k) {
return DispatchKeySet(k).getDispatchTableIndexForDispatchKeySet();
}

View File

@ -111,9 +111,7 @@ struct alignas(2) BFloat16 {
#endif
};
C10_API inline std::ostream& operator<<(
std::ostream& out,
const BFloat16& value) {
inline std::ostream& operator<<(std::ostream& out, const BFloat16& value) {
out << (float)value;
return out;
}

View File

@ -228,9 +228,7 @@ struct alignas(1) Float8_e4m3fn {
inline C10_HOST_DEVICE bool isnan() const;
};
C10_API inline std::ostream& operator<<(
std::ostream& out,
const Float8_e4m3fn& value) {
inline std::ostream& operator<<(std::ostream& out, const Float8_e4m3fn& value) {
out << (float)value;
return out;
}

View File

@ -127,7 +127,7 @@ struct alignas(1) Float8_e4m3fnuz {
inline C10_HOST_DEVICE bool isnan() const;
};
C10_API inline std::ostream& operator<<(
inline std::ostream& operator<<(
std::ostream& out,
const Float8_e4m3fnuz& value) {
out << (float)value;

View File

@ -136,9 +136,7 @@ struct alignas(1) Float8_e5m2 {
inline C10_HOST_DEVICE bool isinf() const;
};
C10_API inline std::ostream& operator<<(
std::ostream& out,
const Float8_e5m2& value) {
inline std::ostream& operator<<(std::ostream& out, const Float8_e5m2& value) {
out << (float)value;
return out;
}

View File

@ -126,7 +126,7 @@ struct alignas(1) Float8_e5m2fnuz {
inline C10_HOST_DEVICE bool isinf() const;
};
C10_API inline std::ostream& operator<<(
inline std::ostream& operator<<(
std::ostream& out,
const Float8_e5m2fnuz& value) {
out << (float)value;

View File

@ -108,7 +108,7 @@ struct alignas(1) Float8_e8m0fnu {
inline C10_HOST_DEVICE bool isnan() const;
};
C10_API inline std::ostream& operator<<(
inline std::ostream& operator<<(
std::ostream& out,
const Float8_e8m0fnu& value) {
out << (float)value;

View File

@ -414,7 +414,7 @@ struct alignas(2) Half {
#endif
};
C10_API inline std::ostream& operator<<(std::ostream& out, const Half& value) {
inline std::ostream& operator<<(std::ostream& out, const Half& value) {
out << (float)value;
return out;
}

View File

@ -79,7 +79,7 @@ C10_API void UpdateLoggingLevelsFromFlags();
const char* msg,
const void* caller = nullptr);
[[noreturn]] C10_API inline void ThrowEnforceNotMet(
[[noreturn]] inline void ThrowEnforceNotMet(
const char* file,
const int line,
const char* condition,
@ -102,7 +102,7 @@ C10_API void UpdateLoggingLevelsFromFlags();
const char* msg,
const void* caller = nullptr);
[[noreturn]] C10_API inline void ThrowEnforceFiniteNotMet(
[[noreturn]] inline void ThrowEnforceFiniteNotMet(
const char* file,
const int line,
const char* condition,

View File

@ -154,23 +154,23 @@ inline bool operator!=(const uint128& lhs, const uint128& rhs) {
return !(lhs == rhs);
}
C10_API inline UINT128_CONSTEXPR uint128::uint128() : lo_(0), hi_(0) {}
C10_API inline UINT128_CONSTEXPR uint128::uint128(uint64_t top, uint64_t bottom)
inline UINT128_CONSTEXPR uint128::uint128() : lo_(0), hi_(0) {}
inline UINT128_CONSTEXPR uint128::uint128(uint64_t top, uint64_t bottom)
: lo_(bottom), hi_(top) {}
C10_API inline UINT128_CONSTEXPR uint128::uint128(const uint128_pod& v)
inline UINT128_CONSTEXPR uint128::uint128(const uint128_pod& v)
: lo_(v.lo), hi_(v.hi) {}
C10_API inline UINT128_CONSTEXPR uint128::uint128(uint64_t bottom)
inline UINT128_CONSTEXPR uint128::uint128(uint64_t bottom)
: lo_(bottom), hi_(0) {}
#ifndef SWIG
C10_API inline UINT128_CONSTEXPR uint128::uint128(uint32_t bottom)
inline UINT128_CONSTEXPR uint128::uint128(uint32_t bottom)
: lo_(bottom), hi_(0) {}
C10_API inline UINT128_CONSTEXPR uint128::uint128(int bottom)
inline UINT128_CONSTEXPR uint128::uint128(int bottom)
: lo_(bottom), hi_(static_cast<int64_t>((bottom < 0) ? -1 : 0)) {}
#endif
#undef UINT128_CONSTEXPR
C10_API inline void uint128::Initialize(uint64_t top, uint64_t bottom) {
inline void uint128::Initialize(uint64_t top, uint64_t bottom) {
hi_ = top;
lo_ = bottom;
}
@ -226,11 +226,11 @@ LOGIC128(^)
#undef LOGIC128
#define LOGICASSIGN128(op) \
C10_API inline uint128& uint128::operator op(const uint128 & other) { \
hi_ op other.hi_; \
lo_ op other.lo_; \
return *this; \
#define LOGICASSIGN128(op) \
inline uint128& uint128::operator op(const uint128 & other) { \
hi_ op other.hi_; \
lo_ op other.lo_; \
return *this; \
}
LOGICASSIGN128(|=)
@ -295,7 +295,7 @@ inline uint128& operator<<=(uint128& self, int amount) {
return self;
}
C10_API inline uint128& uint128::operator>>=(int amount) {
inline uint128& uint128::operator>>=(int amount) {
// uint64_t shifts of >= 64 are undefined, so we will need some
// special-casing.
if (amount < 64) {
@ -333,7 +333,7 @@ inline uint128 operator%(const uint128& lhs, const uint128& rhs) {
return uint128(lhs) %= rhs;
}
C10_API inline uint128& uint128::operator+=(const uint128& b) {
inline uint128& uint128::operator+=(const uint128& b) {
hi_ += b.hi_;
uint64_t lolo = lo_ + b.lo_;
if (lolo < lo_)
@ -342,7 +342,7 @@ C10_API inline uint128& uint128::operator+=(const uint128& b) {
return *this;
}
C10_API inline uint128& uint128::operator-=(const uint128& b) {
inline uint128& uint128::operator-=(const uint128& b) {
hi_ -= b.hi_;
if (b.lo_ > lo_)
--hi_;
@ -350,7 +350,7 @@ C10_API inline uint128& uint128::operator-=(const uint128& b) {
return *this;
}
C10_API inline uint128& uint128::operator*=(const uint128& b) {
inline uint128& uint128::operator*=(const uint128& b) {
uint64_t a96 = hi_ >> 32;
uint64_t a64 = hi_ & 0xffffffffu;
uint64_t a32 = lo_ >> 32;
@ -373,24 +373,24 @@ C10_API inline uint128& uint128::operator*=(const uint128& b) {
return *this;
}
C10_API inline uint128 uint128::operator++(int) {
inline uint128 uint128::operator++(int) {
uint128 tmp(*this);
*this += 1;
return tmp;
}
C10_API inline uint128 uint128::operator--(int) {
inline uint128 uint128::operator--(int) {
uint128 tmp(*this);
*this -= 1;
return tmp;
}
C10_API inline uint128& uint128::operator++() {
inline uint128& uint128::operator++() {
*this += 1;
return *this;
}
C10_API inline uint128& uint128::operator--() {
inline uint128& uint128::operator--() {
*this -= 1;
return *this;
}

View File

@ -418,6 +418,7 @@ function(torch_compile_options libname)
if(WERROR)
list(APPEND private_compile_options
-Werror
-Werror=ignored-attributes
-Werror=inconsistent-missing-override
-Werror=inconsistent-missing-destructor-override
-Werror=pedantic

View File

@ -35,9 +35,7 @@ TORCH_API Tensor toNonOptFwGrad(const std::optional<Tensor>& t);
TORCH_API Tensor toNonOptPrimal(const std::optional<Tensor>& t);
TORCH_API Tensor toNonOptTensor(const std::optional<Tensor>& t);
TORCH_API inline std::optional<Tensor> wrap_opt_if(
const Tensor& t,
const bool cond) {
inline std::optional<Tensor> wrap_opt_if(const Tensor& t, const bool cond) {
using OptTensor = std::optional<Tensor>;
return cond ? OptTensor(t) : static_cast<OptTensor>(std::nullopt);
}

View File

@ -29,7 +29,7 @@ TORCH_API std::string dumpValueSet(
const c10::FastSet<const Value*>& value_set,
const char* set_name = "");
TORCH_API inline bool doesNotHeapAllocateWhenStoredInIValue(const Type& type) {
inline bool doesNotHeapAllocateWhenStoredInIValue(const Type& type) {
switch (type.kind()) {
// NOTE: NumberType may allocate because it includes complex.
case TypeKind::NoneType:
@ -44,11 +44,11 @@ TORCH_API inline bool doesNotHeapAllocateWhenStoredInIValue(const Type& type) {
}
}
TORCH_API inline c10::Symbol getStaticRuntimeMetadataSymbol() {
inline c10::Symbol getStaticRuntimeMetadataSymbol() {
return Symbol::attr("static_runtime::metadata");
}
TORCH_API inline bool borrowsOutputs(c10::Symbol kind) {
inline bool borrowsOutputs(c10::Symbol kind) {
static const std::array<c10::Symbol, 4> symbols_with_borrowed_outputs = {
c10::Symbol::fromQualString("static_runtime::select_tensor"),
c10::Symbol::fromQualString("static_runtime::dict_unpack"),

View File

@ -202,7 +202,7 @@ inline void setTensorMetadata(
}
// Register function pointer of Tensor BackendMetadata for serialization.
TORCH_API inline void TensorBackendMetaRegistry(
inline void TensorBackendMetaRegistry(
c10::DeviceType t,
const BackendMetaPtr& get_fptr,
const BackendMetaPtr& set_fptr) {

View File

@ -35,7 +35,7 @@ struct TORCH_API Event {
std::unordered_map<std::string, data_value_t> data;
};
TORCH_API inline bool operator==(const Event& lhs, const Event& rhs) {
inline bool operator==(const Event& lhs, const Event& rhs) {
return lhs.name == rhs.name && lhs.timestamp == rhs.timestamp &&
lhs.data == rhs.data;
}

View File

@ -43,7 +43,7 @@ TORCH_API void logSoftAssert(
uint32_t line,
const char* cond,
const char* args);
TORCH_API inline void logSoftAssert(
inline void logSoftAssert(
const char* func,
const char* file,
uint32_t line,