std::value/std::type -> std::_v/std::_t (#138746)

Pull Request resolved: https://github.com/pytorch/pytorch/pull/138746
Approved by: https://github.com/cyyever, https://github.com/malfet
This commit is contained in:
Richard Barnes
2024-10-26 20:59:22 +00:00
committed by PyTorch MergeBot
parent fb36daac9f
commit 42994234a6
69 changed files with 233 additions and 238 deletions

View File

@ -80,7 +80,7 @@ public:
template<class Value_>
void setValue(Value_&& value) const {
static_assert(std::is_constructible<Value, Value_>::value, "Wrong type for the value argument of setValue()");
static_assert(std::is_constructible_v<Value, Value_>, "Wrong type for the value argument of setValue()");
iterator_->second = Value(std::forward<Value_>(value));
}

View File

@ -69,8 +69,8 @@ Dict<Key, Value>::Dict()
:Dict(make_intrusive<detail::DictImpl>(
detail::DictImpl::dict_map_type(),
detail::DictImpl::DictElementTypes{getTypePtr<Key>(), getTypePtr<Value>()})) {
static_assert(!std::is_same<Key, IValue>::value, "This constructor is not valid for Dict<IValue, _>. Please use c10::impl::GenericDict(keyType, valueType) instead.");
static_assert(!std::is_same<Value, IValue>::value, "This constructor is not valid for Dict<_, IValue>. Please use c10::impl::GenericDict(keyType, valueType) instead.");
static_assert(!std::is_same_v<Key, IValue>, "This constructor is not valid for Dict<IValue, _>. Please use c10::impl::GenericDict(keyType, valueType) instead.");
static_assert(!std::is_same_v<Value, IValue>, "This constructor is not valid for Dict<_, IValue>. Please use c10::impl::GenericDict(keyType, valueType) instead.");
}
template<class Key, class Value>
@ -78,8 +78,8 @@ Dict<Key, Value>::Dict(TypePtr keyType, TypePtr valueType)
: Dict(make_intrusive<detail::DictImpl>(
detail::DictImpl::dict_map_type(),
detail::DictImpl::DictElementTypes {std::move(keyType), std::move(valueType)})) {
static_assert(std::is_same<Key, IValue>::value, "This constructor is only valid for c10::impl::GenericDict.");
static_assert(std::is_same<Value, IValue>::value, "This constructor is only valid for c10::impl::GenericDict.");
static_assert(std::is_same_v<Key, IValue>, "This constructor is only valid for c10::impl::GenericDict.");
static_assert(std::is_same_v<Value, IValue>, "This constructor is only valid for c10::impl::GenericDict.");
}
template<class Key, class Value>
@ -118,8 +118,8 @@ void Dict<Key, Value>::clear() const {
template<class Key, class Value>
template<class Key_, class Value_>
std::pair<typename Dict<Key, Value>::iterator, bool> Dict<Key, Value>::insert(Key_&& key, Value_&& value) const {
static_assert(std::is_constructible<Key, Key_>::value, "Wrong type for the key argument of Dict::insert");
static_assert(std::is_constructible<Value, Value_>::value, "Wrong type for the value argument of Dict::insert");
static_assert(std::is_constructible_v<Key, Key_>, "Wrong type for the key argument of Dict::insert");
static_assert(std::is_constructible_v<Value, Value_>, "Wrong type for the value argument of Dict::insert");
auto inserted = impl_->dict.emplace(
Key(std::forward<Key_>(key)),
Value(std::forward<Value_>(value)));
@ -129,8 +129,8 @@ std::pair<typename Dict<Key, Value>::iterator, bool> Dict<Key, Value>::insert(Ke
template<class Key, class Value>
template<class Key_, class Value_>
std::pair<typename Dict<Key, Value>::iterator, bool> Dict<Key, Value>::insert_or_assign(Key_&& key, Value_&& value) const {
static_assert(std::is_constructible<Key, Key_>::value, "Wrong type for the key argument of Dict::insert_or_assign");
static_assert(std::is_constructible<Value, Value_>::value, "Wrong type for the value argument of Dict::insert_or_assign");
static_assert(std::is_constructible_v<Key, Key_>, "Wrong type for the key argument of Dict::insert_or_assign");
static_assert(std::is_constructible_v<Value, Value_>, "Wrong type for the value argument of Dict::insert_or_assign");
auto inserted = impl_->dict.insert_or_assign(
Key(std::forward<Key_>(key)),
Value(std::forward<Value_>(value)));

View File

@ -42,10 +42,10 @@ struct uniform_int_from_to_distribution {
template <typename RNG>
C10_HOST_DEVICE inline T operator()(RNG generator) {
if ((
std::is_same<T, int64_t>::value ||
std::is_same<T, double>::value ||
std::is_same<T, float>::value ||
std::is_same<T, at::BFloat16>::value) && range_ >= 1ULL << 32)
std::is_same_v<T, int64_t> ||
std::is_same_v<T, double> ||
std::is_same_v<T, float> ||
std::is_same_v<T, at::BFloat16>) && range_ >= 1ULL << 32)
{
return transformation::uniform_int_from_to<T>(generator->random64(), range_, base_);
} else {

View File

@ -21,7 +21,7 @@ List<T>::List()
: List(make_intrusive<c10::detail::ListImpl>(
typename c10::detail::ListImpl::list_type(),
getTypePtr<T>())) {
static_assert(!std::is_same<T, IValue>::value, "This constructor is not valid for List<IValue>. Please use c10::impl::GenericList(elementType) instead.");
static_assert(!std::is_same_v<T, IValue>, "This constructor is not valid for List<IValue>. Please use c10::impl::GenericList(elementType) instead.");
}
template<class T>
@ -29,7 +29,7 @@ List<T>::List(ArrayRef<T> values)
: List(make_intrusive<c10::detail::ListImpl>(
typename c10::detail::ListImpl::list_type(),
getTypePtr<T>())) {
static_assert(!std::is_same<T, IValue>::value, "This constructor is not valid for List<IValue>. Please use c10::impl::GenericList(elementType).");
static_assert(!std::is_same_v<T, IValue>, "This constructor is not valid for List<IValue>. Please use c10::impl::GenericList(elementType).");
impl_->list.reserve(values.size());
for (const T& element : values) {
impl_->list.push_back(element);
@ -39,7 +39,7 @@ List<T>::List(ArrayRef<T> values)
template<class T>
List<T>::List(std::initializer_list<T> initial_values)
: List(ArrayRef<T>(initial_values)) {
static_assert(!std::is_same<T, IValue>::value, "This constructor is not valid for List<IValue>. Please use c10::impl::GenericList(elementType).");
static_assert(!std::is_same_v<T, IValue>, "This constructor is not valid for List<IValue>. Please use c10::impl::GenericList(elementType).");
}
template<class T>
@ -47,7 +47,7 @@ List<T>::List(TypePtr elementType)
: List(make_intrusive<c10::detail::ListImpl>(
typename c10::detail::ListImpl::list_type(),
std::move(elementType))) {
static_assert(std::is_same<T, IValue>::value || std::is_same<T, c10::intrusive_ptr<ivalue::Future>>::value,
static_assert(std::is_same_v<T, IValue> || std::is_same<T, c10::intrusive_ptr<ivalue::Future>>::value,
"This constructor is only valid for c10::impl::GenericList or List<Future>.");
}

View File

@ -72,7 +72,7 @@ template <typename T>
auto Tensor::register_hook(T&& hook) const -> Tensor::hook_return_void_t<T> {
// Return the grad argument in case of a hook with void return type to have an
// std::function with Tensor return type
static_assert(std::is_same<decltype(hook(Tensor())), void>::value,
static_assert(std::is_same_v<decltype(hook(Tensor())), void>,
"Expected hook to return void");
return _register_hook([fn=std::forward<T>(hook)](const TensorBase& grad_base) {
TensorRef grad(grad_base);

View File

@ -95,7 +95,7 @@ class TORCH_API Blob final : public c10::intrusive_ptr_target {
template <class T>
T* GetMutable() {
static_assert(
std::is_default_constructible<T>::value,
std::is_default_constructible_v<T>,
"GetMutable can't be called with non-default-constructible types. "
"Try using specialized methods");
if (IsType<T>()) {

View File

@ -80,7 +80,7 @@ inline BoxedKernel BoxedKernel::makeNamedNotSupported() {
template<class KernelFunctor>
inline BoxedKernel BoxedKernel::makeFromFunctor(std::unique_ptr<KernelFunctor> kernelFunctor) {
static_assert(std::is_base_of<OperatorKernel, KernelFunctor>::value, "Tried to call BoxedKernel::makeFromFunctor<KernelFunctor>, but the functor doesn't inherit from c10::OperatorKernel. Please have the functor inherit from it.");
static_assert(std::is_base_of_v<OperatorKernel, KernelFunctor>, "Tried to call BoxedKernel::makeFromFunctor<KernelFunctor>, but the functor doesn't inherit from c10::OperatorKernel. Please have the functor inherit from it.");
return BoxedKernel(
std::move(kernelFunctor),
[](OperatorKernel* kernel, const OperatorHandle& op, DispatchKeySet ks, Stack* stack) {

View File

@ -162,7 +162,7 @@ inline KernelFunction KernelFunction::makeFromUnboxedFunctor(std::unique_ptr<Ope
// This assertion is costly for build time so it's debug-gated.
static_assert(guts::is_functor<KernelFunctor>::value, "Tried to call KernelFunction::makeFromUnboxedFunctor<KernelFunctor> but the argument is not a functor.");
#endif
static_assert(std::is_base_of<OperatorKernel, KernelFunctor>::value, "Tried to call KernelFunction::makeFromUnboxedFunctor<KernelFunctor>, but the functor doesn't inherit from c10::OperatorKernel. Please have the functor inherit from it.");
static_assert(std::is_base_of_v<OperatorKernel, KernelFunctor>, "Tried to call KernelFunction::makeFromUnboxedFunctor<KernelFunctor>, but the functor doesn't inherit from c10::OperatorKernel. Please have the functor inherit from it.");
auto* unboxed_fn = &impl::wrap_kernel_functor_unboxed<KernelFunctor>::call;
void* void_unboxed_fn = reinterpret_cast<void*>(unboxed_fn);
@ -184,7 +184,7 @@ inline KernelFunction KernelFunction::makeFromBoxedFunctor(std::unique_ptr<Kerne
template<class FuncPtr, bool AllowLegacyTypes>
inline KernelFunction KernelFunction::makeFromUnboxedFunction(FuncPtr func_ptr) {
static_assert(is_compile_time_function_pointer<FuncPtr>::value, "Tried to call KernelFunction::makeFromUnboxedFunction with an invalid parameter. It must be a function pointer created with TORCH_FN.");
static_assert(!std::is_same<typename FuncPtr::FuncType, BoxedKernelFunction>::value, "Tried to call KernelFunction::makeFromUnboxedFunction with a boxed function pointer. Please use KernelFunction::makeFromBoxedFunction instead.");
static_assert(!std::is_same_v<typename FuncPtr::FuncType, BoxedKernelFunction>, "Tried to call KernelFunction::makeFromUnboxedFunction with a boxed function pointer. Please use KernelFunction::makeFromBoxedFunction instead.");
#if defined(__GNUC__) && defined(__SANITIZE_ADDRESS__) && !defined(__CUDACC__)
TORCH_INTERNAL_ASSERT(FuncPtr::func_ptr() != nullptr, "Kernel function cannot be nullptr");
#else
@ -207,7 +207,7 @@ inline KernelFunction KernelFunction::makeFromUnboxedFunction(FuncPtr func_ptr)
template<bool AllowLegacyTypes, class FuncType>
inline KernelFunction KernelFunction::makeFromUnboxedRuntimeFunction(FuncType* func) {
static_assert(guts::is_function_type<FuncType>::value, "Tried to call KernelFunction::makeFromUnboxedRuntimeFunction with a non-function type.");
static_assert(!std::is_same<FuncType, BoxedKernelFunction>::value, "Tried to call KernelFunction::makeFromUnboxedRuntimeFunction with a boxed function pointer. Please use KernelFunction::makeFromBoxedFunction instead.");
static_assert(!std::is_same_v<FuncType, BoxedKernelFunction>, "Tried to call KernelFunction::makeFromUnboxedRuntimeFunction with a boxed function pointer. Please use KernelFunction::makeFromBoxedFunction instead.");
TORCH_INTERNAL_ASSERT(func != nullptr, "Kernel function cannot be nullptr");
return makeFromUnboxedFunctor<AllowLegacyTypes, impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<FuncType>>>(

View File

@ -383,7 +383,7 @@ struct BoxedKernelWrapper<
// that the last RetCount elements are of type `Tensor&`.
auto result = guts::tuple_take<ArgTuple, -RetCount>(ArgTuple{std::forward<Args>(args)...});
static_assert(
std::is_same<Result, decltype(result)>::value,
std::is_same_v<Result, decltype(result)>,
"The parameter list of an op returning a tuple of Tensor references "
"must end with an equal number of Tensor reference parameters."
);

View File

@ -154,39 +154,39 @@ namespace impl {
template<class T, bool AllowDeprecatedTypes>
struct assert_is_valid_input_type<List<T>, AllowDeprecatedTypes>
: assert_is_valid_input_type<T, AllowDeprecatedTypes> {
static_assert(!std::is_same<T, at::Scalar>::value,
static_assert(!std::is_same_v<T, at::Scalar>,
"You tried to register a kernel with an unsupported input type: List<Scalar>. Please use List<int64_t>, List<double> or Tensor instead.");
};
template<class T, bool AllowDeprecatedTypes>
struct assert_is_valid_input_type<c10::ArrayRef<T>, AllowDeprecatedTypes>
: assert_is_valid_input_type<T, AllowDeprecatedTypes> {
static_assert(!std::is_same<T, at::Scalar>::value,
static_assert(!std::is_same_v<T, at::Scalar>,
"You tried to register a kernel with an unsupported input type: ArrayRef<Scalar>. Please use List<int64_t>, List<double> or Tensor instead.");
};
template<class T, bool AllowDeprecatedTypes>
struct assert_is_valid_input_type<c10::OptionalArrayRef<T>, AllowDeprecatedTypes>
: assert_is_valid_input_type<T, AllowDeprecatedTypes> {
static_assert(!std::is_same<T, at::Scalar>::value,
static_assert(!std::is_same_v<T, at::Scalar>,
"You tried to register a kernel with an unsupported input type: OptionalArrayRef<Scalar>. Please use List<int64_t>, List<double> or Tensor instead.");
};
template<class T, size_t N, bool AllowDeprecatedTypes>
struct assert_is_valid_input_type<std::array<T, N>, AllowDeprecatedTypes>
: assert_is_valid_input_type<T, AllowDeprecatedTypes> {
static_assert(!std::is_same<T, at::Scalar>::value,
static_assert(!std::is_same_v<T, at::Scalar>,
"You tried to register a kernel with an unsupported input type: std::array<Scalar, N>. Please use std::array<int64_t, N> instead.");
};
template<class T, bool AllowDeprecatedTypes>
struct assert_is_valid_input_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_same<float, T>::value>> {
struct assert_is_valid_input_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_same_v<float, T>>> {
// There is no reason to support float when we have double. Keep the API lean.
static_assert(guts::false_t<T>::value,
"You tried to register a kernel with an unsupported input type: float. Please use double instead; you should use `double` in the C++ function signature and `float` in the schema string.");
};
template<class T, bool AllowDeprecatedTypes>
struct assert_is_valid_input_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_same<const char*, T>::value>> {
struct assert_is_valid_input_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_same_v<const char*, T>>> {
static_assert(guts::false_t<T>::value,
"You tried to register a kernel with an unsupported input type: const char*. Please use c10::string_view instead.");
};
@ -196,12 +196,12 @@ namespace impl {
"You tried to register a kernel with an unsupported input type: vector<bool>. Please use List<bool> instead.");
};
template<class T, bool AllowDeprecatedTypes>
struct assert_is_valid_input_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_integral<T>::value && !guts::typelist::contains<supported_primitive_arg_types, T>::value>> {
struct assert_is_valid_input_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_integral_v<T> && !guts::typelist::contains<supported_primitive_arg_types, T>::value>> {
static_assert(guts::false_t<T>::value,
"You tried to register a kernel with an unsupported integral input type. Please use int64_t instead; you should use `int64_t` in the C++ function signature and `int` in the schema string.");
};
template<class T, bool AllowDeprecatedTypes>
struct assert_is_valid_input_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_same<const c10::SymInt&, T>::value>> {
struct assert_is_valid_input_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_same_v<const c10::SymInt&, T>>> {
static_assert(guts::false_t<T>::value,
"You tried to register a kernel taking c10::SymInt by reference. Please accept it by value instead.");
};
@ -238,7 +238,7 @@ namespace impl {
: assert_is_valid_output_type<Value, AllowDeprecatedTypes> {
static_assert(guts::typelist::contains<impl::valid_dict_key_types, Key>::value,
"You tried to register a kernel with an unsupported output type: Dict<Key, Value> where Key is invalid. We only support int64_t, double, bool, and string.");
static_assert(!std::is_same<Value, at::Scalar>::value,
static_assert(!std::is_same_v<Value, at::Scalar>,
"You tried to register a kernel with an unsupported output type: Dict<Key, Scalar>. Please use Dict<Key, int64_t> or Dict<Key, double>.");
};
@ -249,21 +249,21 @@ namespace impl {
"You tried to register a kernel with an unsupported output type: std::unordered_map<Key, Value>. Please use Dict<Key, Value> instead.");
static_assert(guts::typelist::contains<impl::valid_dict_key_types, Key>::value,
"You tried to register a kernel with an unsupported output type: std::unordered_map<Key, Value> where Key is invalid. We only support int64_t, double, bool, and string.");
static_assert(!std::is_same<Value, at::Scalar>::value,
static_assert(!std::is_same_v<Value, at::Scalar>,
"You tried to register a kernel with an unsupported output type: std::unordered_map<Key, Scalar>. Please use Dict<Key, int64_t> or Dict<Key, double>.");
};
template<class T, bool AllowDeprecatedTypes>
struct assert_is_valid_output_type<List<T>, AllowDeprecatedTypes>
: assert_is_valid_output_type<T, AllowDeprecatedTypes> {
static_assert(!std::is_same<T, at::Scalar>::value,
static_assert(!std::is_same_v<T, at::Scalar>,
"You tried to register a kernel with an unsupported output type: List<Scalar>. Please use List<int64_t>, List<double> or Tensor instead.");
};
template<class T, bool AllowDeprecatedTypes>
struct assert_is_valid_output_type<std::vector<T>, AllowDeprecatedTypes>
: assert_is_valid_output_type<T, AllowDeprecatedTypes> {
static_assert(!std::is_same<T, at::Scalar>::value,
static_assert(!std::is_same_v<T, at::Scalar>,
"You tried to register a kernel with an unsupported output type: std::vector<Scalar>. Please use List<int64_t>, List<double> or Tensor instead.");
// TODO static_assert(AllowDeprecatedTypes, "You tried to register a kernel with an unsupported output type: std::vector<T>. Please use List<T> instead.");
};
@ -271,7 +271,7 @@ namespace impl {
template<class T, size_t N, bool AllowDeprecatedTypes>
struct assert_is_valid_output_type<std::array<T, N>, AllowDeprecatedTypes>
: assert_is_valid_output_type<T, AllowDeprecatedTypes> {
static_assert(!std::is_same<T, at::Scalar>::value,
static_assert(!std::is_same_v<T, at::Scalar>,
"You tried to register a kernel with an unsupported output type: std::array<Scalar, N>. Please use std::array<int64_t, N> instead.");
};
@ -280,13 +280,13 @@ namespace impl {
// there if they didn't exist, but we can show a better error message
// in some common error scenarios.
template<class T, bool AllowDeprecatedTypes>
struct assert_is_valid_output_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_same<float, T>::value>> {
struct assert_is_valid_output_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_same_v<float, T>>> {
// There is no reason to support float when we have double. Keep the API lean.
static_assert(guts::false_t<T>::value,
"You tried to register a kernel with an unsupported output type: float. Please use double instead; you should use `double` in the C++ function signature and `float` in the schema string.");
};
template<class T, bool AllowDeprecatedTypes>
struct assert_is_valid_output_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_same<const char*, T>::value>> {
struct assert_is_valid_output_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_same_v<const char*, T>>> {
static_assert(guts::false_t<T>::value,
"You tried to register a kernel with an unsupported output type: const char*. Please use c10::string_view instead.");
};
@ -296,7 +296,7 @@ namespace impl {
"You tried to register a kernel with an unsupported output type: vector<bool>. Please use List<bool> instead.");
};
template<class T, bool AllowDeprecatedTypes>
struct assert_is_valid_output_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_integral<T>::value && !guts::typelist::contains<supported_primitive_arg_types, T>::value>> {
struct assert_is_valid_output_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_integral_v<T> && !guts::typelist::contains<supported_primitive_arg_types, T>::value>> {
static_assert(guts::false_t<T>::value,
"You tried to register a kernel with an unsupported integral output type. Please use int64_t instead; you should use `int64_t` in the C++ function signature and `int` in the schema string.");
};
@ -417,7 +417,7 @@ namespace impl {
struct return_to_ivalue final {};
template<class T, bool AllowDeprecatedTypes>
struct return_to_ivalue<T, AllowDeprecatedTypes, std::enable_if_t<!std::is_same<at::Tensor&, T>::value>> final {
struct return_to_ivalue<T, AllowDeprecatedTypes, std::enable_if_t<!std::is_same_v<at::Tensor&, T>>> final {
static IValue call(T&& v) {
assert_is_valid_output_type<T, AllowDeprecatedTypes>();
return c10::ivalue::from(std::move(v));
@ -564,7 +564,7 @@ namespace impl {
template<class KernelFunctor, bool AllowDeprecatedTypes>
struct make_boxed_from_unboxed_functor final {
static_assert(std::is_base_of<OperatorKernel, KernelFunctor>::value,
static_assert(std::is_base_of_v<OperatorKernel, KernelFunctor>,
"Tried to register a kernel functor using the kernel<Functor>() API, but it doesn't inherit from c10::OperatorKernel. Please have the functor inherit from it.");
static void call(OperatorKernel* functor, const OperatorHandle&, DispatchKeySet dispatchKeySet, Stack* stack) {
@ -574,7 +574,7 @@ namespace impl {
// We don't want to expose the DispatchKeySet type to jit, so we don't include this argument on the stack.
// See Note [Plumbing Keys Through The Dispatcher] for the background.
using ArgTypes = typename c10::remove_DispatchKeySet_arg_from_func<KernelFunctor>::parameter_types;
constexpr bool has_outputs = !std::is_same<void, ReturnType>::value;
constexpr bool has_outputs = !std::is_same_v<void, ReturnType>;
constexpr size_t num_inputs = guts::typelist::size<ArgTypes>::value;
if constexpr (has_outputs) {
// Decay ReturnType to ReturnType_ so that if a reference gets returned, we actually store it by value

View File

@ -37,10 +37,10 @@ constexpr int checkStaticTypes() {
// Give nice error messages for some of the common error cases.
// Use a LOUD ERROR MESSAGE SO USERS SEE THE STATIC_ASSERT
static_assert(std::conjunction<
bool_t<!std::is_integral<Types>::value || std::is_same<Types, int8_t>::value || std::is_same<Types, int64_t>::value || std::is_same<Types, bool>::value>...
bool_t<!std::is_integral_v<Types> || std::is_same_v<Types, int8_t> || std::is_same_v<Types, int64_t> || std::is_same_v<Types, bool>>...
>::value, "INVALID TYPE: Only int8_t, int64_t and bool are supported as an integral argument type");
static_assert(std::conjunction<
bool_t<!std::is_same<Types, float>::value>...
bool_t<!std::is_same_v<Types, float>>...
>::value, "INVALID TYPE: float is not supported as an argument type, use double instead");
return 0;
}
@ -87,7 +87,7 @@ struct createReturns<std::tuple<ReturnTypes...>, void> final {
};
template<class ReturnType>
struct createReturns<ReturnType, std::enable_if_t<!std::is_same<void, ReturnType>::value && !guts::is_instantiation_of<std::tuple, ReturnType>::value>> final {
struct createReturns<ReturnType, std::enable_if_t<!std::is_same_v<void, ReturnType> && !guts::is_instantiation_of<std::tuple, ReturnType>::value>> final {
static constexpr std::array<ArgumentDef, 1> call() {
return createReturns<std::tuple<ReturnType>>::call();
}

View File

@ -159,8 +159,8 @@ public:
template<class KernelFunctor, class... ConstructorParameters>
// enable_if: only enable it if KernelFunctor is actually a functor
std::enable_if_t<guts::is_functor<KernelFunctor>::value, Options&&> kernel(DispatchKey dispatch_key, ConstructorParameters&&... constructorParameters) && {
static_assert(std::is_base_of<OperatorKernel, KernelFunctor>::value, "Tried to register a kernel functor using the kernel<Functor>() API, but it doesn't inherit from c10::OperatorKernel. Please have the functor inherit from it.");
static_assert(std::is_constructible<KernelFunctor, ConstructorParameters...>::value, "Wrong argument list for constructor of kernel functor. The arguments to kernel<Functor>(arguments...) must match one of the constructors of Functor.");
static_assert(std::is_base_of_v<OperatorKernel, KernelFunctor>, "Tried to register a kernel functor using the kernel<Functor>() API, but it doesn't inherit from c10::OperatorKernel. Please have the functor inherit from it.");
static_assert(std::is_constructible_v<KernelFunctor, ConstructorParameters...>, "Wrong argument list for constructor of kernel functor. The arguments to kernel<Functor>(arguments...) must match one of the constructors of Functor.");
return std::move(*this).kernel(
dispatch_key,
@ -211,8 +211,8 @@ public:
template<class KernelFunctor, class... ConstructorParameters>
// enable_if: only enable it if KernelFunctor is actually a functor
std::enable_if_t<guts::is_functor<KernelFunctor>::value, Options&&> catchAllKernel(ConstructorParameters&&... constructorParameters) && {
static_assert(std::is_base_of<OperatorKernel, KernelFunctor>::value, "Tried to register a kernel functor using the kernel<Functor>() API, but it doesn't inherit from c10::OperatorKernel. Please have the functor inherit from it.");
static_assert(std::is_constructible<KernelFunctor, ConstructorParameters...>::value, "Wrong argument list for constructor of kernel functor. The arguments to kernel<Functor>(arguments...) must match one of the constructors of Functor.");
static_assert(std::is_base_of_v<OperatorKernel, KernelFunctor>, "Tried to register a kernel functor using the kernel<Functor>() API, but it doesn't inherit from c10::OperatorKernel. Please have the functor inherit from it.");
static_assert(std::is_constructible_v<KernelFunctor, ConstructorParameters...>, "Wrong argument list for constructor of kernel functor. The arguments to kernel<Functor>(arguments...) must match one of the constructors of Functor.");
return std::move(*this).kernel(
std::nullopt,
@ -239,7 +239,7 @@ public:
template<class FuncType, FuncType* kernel_func>
// enable_if: only enable it if FuncType is actually a function
std::enable_if_t<guts::is_function_type<FuncType>::value, Options&&> kernel(DispatchKey dispatch_key) && {
static_assert(!std::is_same<FuncType, KernelFunction::BoxedKernelFunction>::value, "Tried to register a stackbased (i.e. internal) kernel function using the public kernel<...>() API. Please either use the internal kernel(...) API or also implement the kernel function as defined by the public API.");
static_assert(!std::is_same_v<FuncType, KernelFunction::BoxedKernelFunction>, "Tried to register a stackbased (i.e. internal) kernel function using the public kernel<...>() API. Please either use the internal kernel(...) API or also implement the kernel function as defined by the public API.");
static_assert(kernel_func != nullptr, "Kernel function cannot be nullptr");
return std::move(*this).kernel(
@ -268,7 +268,7 @@ public:
template<class FuncType, FuncType* kernel_func>
// enable_if: only enable it if FuncType is actually a function
std::enable_if_t<guts::is_function_type<FuncType>::value, Options&&> catchAllKernel() && {
static_assert(!std::is_same<FuncType, KernelFunction::BoxedKernelFunction>::value, "Tried to register a stackbased (i.e. internal) kernel function using the public kernel<...>() API. Please either use the internal kernel(...) API or also implement the kernel function as defined by the public API.");
static_assert(!std::is_same_v<FuncType, KernelFunction::BoxedKernelFunction>, "Tried to register a stackbased (i.e. internal) kernel function using the public kernel<...>() API. Please either use the internal kernel(...) API or also implement the kernel function as defined by the public API.");
static_assert(kernel_func != nullptr, "Kernel function cannot be nullptr");
return std::move(*this).kernel(
@ -283,7 +283,7 @@ public:
template<class FuncType>
// enable_if: only enable it if FuncType is actually a function
std::enable_if_t<guts::is_function_type<FuncType>::value, Options&&> kernel(DispatchKey dispatch_key, FuncType* kernel_func) && {
static_assert(!std::is_same<FuncType, KernelFunction::BoxedKernelFunction>::value, "Tried to register a stackbased (i.e. internal) kernel function using the public kernel<...>() API. Please either use the internal kernel(...) API or also implement the kernel function as defined by the public API.");
static_assert(!std::is_same_v<FuncType, KernelFunction::BoxedKernelFunction>, "Tried to register a stackbased (i.e. internal) kernel function using the public kernel<...>() API. Please either use the internal kernel(...) API or also implement the kernel function as defined by the public API.");
TORCH_INTERNAL_ASSERT(kernel_func != nullptr, "Kernel function cannot be nullptr");
return std::move(*this).kernel(
@ -298,7 +298,7 @@ public:
template<class FuncType>
// enable_if: only enable it if FuncType is actually a function
std::enable_if_t<guts::is_function_type<FuncType>::value, Options&&> catchAllKernel(FuncType* kernel_func) && {
static_assert(!std::is_same<FuncType, KernelFunction::BoxedKernelFunction>::value, "Tried to register a stackbased (i.e. internal) kernel function using the public kernel<...>() API. Please either use the internal kernel(...) API or also implement the kernel function as defined by the public API.");
static_assert(!std::is_same_v<FuncType, KernelFunction::BoxedKernelFunction>, "Tried to register a stackbased (i.e. internal) kernel function using the public kernel<...>() API. Please either use the internal kernel(...) API or also implement the kernel function as defined by the public API.");
TORCH_INTERNAL_ASSERT(kernel_func != nullptr, "Kernel function cannot be nullptr");
return std::move(*this).kernel(
@ -518,7 +518,7 @@ public:
*/
template<class FuncType>
// enable_if: only enable it if FuncType is actually a function, but not a stack based BoxedKernelFunction.
std::enable_if_t<guts::is_function_type<FuncType>::value && !std::is_same<FuncType, KernelFunction::BoxedKernelFunction>::value, RegisterOperators&&>
std::enable_if_t<guts::is_function_type<FuncType>::value && !std::is_same_v<FuncType, KernelFunction::BoxedKernelFunction>, RegisterOperators&&>
op(const std::string& schemaOrName, FuncType* func, Options&& options = RegisterOperators::options()) && {
constexpr bool AllowLegacyTypes = true;
return std::move(*this).op(std::move(options).schema(schemaOrName).kernel(
@ -549,7 +549,7 @@ public:
// enable_if: only enable it if Lambda is actually a stateless lambda
std::enable_if_t<guts::is_functor<Lambda>::value && guts::is_stateless_lambda<std::decay_t<Lambda>>::value, RegisterOperators&&>
op(const std::string& schemaOrName, Lambda&& lambda, Options&& options = RegisterOperators::options()) && {
static_assert(!std::is_base_of<OperatorKernel, Lambda>::value, "c10::OperatorKernel is part of the new kernel registration API and shouldn't be used together with the deprecated registration API. Please use the new RegisterOperators::options().kernel() based API instead.");
static_assert(!std::is_base_of_v<OperatorKernel, Lambda>, "c10::OperatorKernel is part of the new kernel registration API and shouldn't be used together with the deprecated registration API. Please use the new RegisterOperators::options().kernel() based API instead.");
constexpr bool AllowLegacyTypes = true;
return std::move(*this).op(std::move(options).schema(schemaOrName).kernel(
@ -566,7 +566,7 @@ public:
// enable_if: only enable it if Lambda is actually a functor but not a stateless lambda
std::enable_if_t<guts::is_functor<Lambda>::value && !guts::is_stateless_lambda<std::decay_t<Lambda>>::value, RegisterOperators&&>
op(const std::string& schemaOrName, Lambda&& lambda, Options&& options = RegisterOperators::options()) && {
static_assert(!std::is_base_of<OperatorKernel, Lambda>::value, "c10::OperatorKernel is part of the new kernel registration API and shouldn't be used together with the deprecated registration API. Please use the new RegisterOperators::options().kernel() based API instead.");
static_assert(!std::is_base_of_v<OperatorKernel, Lambda>, "c10::OperatorKernel is part of the new kernel registration API and shouldn't be used together with the deprecated registration API. Please use the new RegisterOperators::options().kernel() based API instead.");
constexpr bool AllowLegacyTypes = true;
return std::move(*this).op(std::move(options).schema(schemaOrName).kernel(

View File

@ -22,18 +22,18 @@ inline namespace CPU_CAPABILITY {
template <typename T>
constexpr bool is_zarch_implemented() {
return (
std::is_same<T, float>::value || std::is_same<T, double>::value ||
std::is_same<T, int8_t>::value || std::is_same<T, uint8_t>::value ||
std::is_same<T, uint16_t>::value || std::is_same<T, int16_t>::value ||
std::is_same<T, int32_t>::value || std::is_same<T, int64_t>::value);
std::is_same_v<T, float> || std::is_same_v<T, double> ||
std::is_same_v<T, int8_t> || std::is_same_v<T, uint8_t> ||
std::is_same_v<T, uint16_t> || std::is_same_v<T, int16_t> ||
std::is_same_v<T, int32_t> || std::is_same_v<T, int64_t>);
}
template <typename T>
constexpr bool is_zarch_implemented_quant() {
return (
std::is_same<T, c10::qint32>::value ||
std::is_same<T, c10::qint8>::value ||
std::is_same<T, c10::quint8>::value);
std::is_same_v<T, c10::qint32> ||
std::is_same_v<T, c10::qint8> ||
std::is_same_v<T, c10::quint8>);
}
template <typename T>
@ -790,14 +790,14 @@ struct Vectorized<T, std::enable_if_t<is_zarch_implemented<T>()>> {
template <
typename U = T,
std::enable_if_t<!std::is_unsigned<U>::value, int> = 0>
std::enable_if_t<!std::is_unsigned_v<U>, int> = 0>
Vectorized<U> C10_ALWAYS_INLINE abs() const {
return {vec_abs(_vec0), vec_abs(_vec1)};
}
template <
typename U = T,
std::enable_if_t<std::is_unsigned<U>::value, int> = 0>
std::enable_if_t<std::is_unsigned_v<U>, int> = 0>
Vectorized<U> C10_ALWAYS_INLINE abs() const {
return {_vec0, _vec1};
}
@ -828,7 +828,7 @@ struct Vectorized<T, std::enable_if_t<is_zarch_implemented<T>()>> {
template <
typename U = T,
std::enable_if_t<std::is_floating_point<U>::value, int> = 0>
std::enable_if_t<std::is_floating_point_v<U>, int> = 0>
Vectorized<U> angle() const {
auto tmp = blendv(
Vectorized<U>(0), Vectorized<U>(c10::pi<U>), *this < Vectorized<U>(0));
@ -837,7 +837,7 @@ struct Vectorized<T, std::enable_if_t<is_zarch_implemented<T>()>> {
template <
typename U = T,
std::enable_if_t<!std::is_floating_point<U>::value, int> = 0>
std::enable_if_t<!std::is_floating_point_v<U>, int> = 0>
Vectorized<U> angle() const {
return blendv(
Vectorized<U>(0), Vectorized<U>(c10::pi<U>), *this < Vectorized<U>(0));
@ -855,7 +855,7 @@ struct Vectorized<T, std::enable_if_t<is_zarch_implemented<T>()>> {
template <
typename U = T,
std::enable_if_t<std::is_floating_point<U>::value, int> = 0>
std::enable_if_t<std::is_floating_point_v<U>, int> = 0>
int zero_mask() const {
auto cmp = (*this == Vectorized<U>(0));
constexpr auto mask_zero_bits = GetBpermZeroMask<U>();
@ -902,7 +902,7 @@ struct Vectorized<T, std::enable_if_t<is_zarch_implemented<T>()>> {
template <
typename U = T,
std::enable_if_t<std::is_same<U, float>::value, int> = 0>
std::enable_if_t<std::is_same_v<U, float>, int> = 0>
inline Vectorized<T> mapOrdinary(float (*const f)(float)) const {
float a00 = f(_vec0[0]);
float a01 = f(_vec0[1]);
@ -917,14 +917,14 @@ struct Vectorized<T, std::enable_if_t<is_zarch_implemented<T>()>> {
template <
typename U = T,
std::enable_if_t<std::is_same<U, double>::value, int> = 0>
std::enable_if_t<std::is_same_v<U, double>, int> = 0>
inline Vectorized<T> mapOrdinary(double (*const f)(double)) const {
return Vectorized<T>(f(_vec0[0]), f(_vec0[1]), f(_vec1[0]), f(_vec1[1]));
}
template <
typename U = T,
std::enable_if_t<std::is_same<U, float>::value, int> = 0>
std::enable_if_t<std::is_same_v<U, float>, int> = 0>
inline Vectorized<T> mapOrdinary(
float (*const f)(float, float),
const Vectorized<T>& b) const {
@ -941,7 +941,7 @@ struct Vectorized<T, std::enable_if_t<is_zarch_implemented<T>()>> {
template <
typename U = T,
std::enable_if_t<std::is_same<U, double>::value, int> = 0>
std::enable_if_t<std::is_same_v<U, double>, int> = 0>
inline Vectorized<T> mapOrdinary(
double (*const f)(double, double),
const Vectorized<T>& b) const {
@ -956,7 +956,7 @@ struct Vectorized<T, std::enable_if_t<is_zarch_implemented<T>()>> {
typename FloatOp,
typename DoubleOp,
typename U = T,
std::enable_if_t<std::is_same<U, float>::value, int> = 0>
std::enable_if_t<std::is_same_v<U, float>, int> = 0>
inline Vectorized<T> mapSleef(FloatOp f, DoubleOp d) const {
vtype a0 = f(_vec0);
vtype a1 = f(_vec1);
@ -967,7 +967,7 @@ struct Vectorized<T, std::enable_if_t<is_zarch_implemented<T>()>> {
typename FloatOp,
typename DoubleOp,
typename U = T,
std::enable_if_t<std::is_same<U, double>::value, int> = 0>
std::enable_if_t<std::is_same_v<U, double>, int> = 0>
inline Vectorized<T> mapSleef(FloatOp f, DoubleOp d) const {
return Vectorized<T>(d(_vec0), d(_vec1));
}
@ -976,7 +976,7 @@ struct Vectorized<T, std::enable_if_t<is_zarch_implemented<T>()>> {
typename FloatOp,
typename DoubleOp,
typename U = T,
std::enable_if_t<std::is_same<U, float>::value, int> = 0>
std::enable_if_t<std::is_same_v<U, float>, int> = 0>
inline Vectorized<T> mapSleef(FloatOp f, DoubleOp d, const Vectorized<T>& b)
const {
vtype a0 = f(_vec0, b._vec0);
@ -988,7 +988,7 @@ struct Vectorized<T, std::enable_if_t<is_zarch_implemented<T>()>> {
typename FloatOp,
typename DoubleOp,
typename U = T,
std::enable_if_t<std::is_same<U, double>::value, int> = 0>
std::enable_if_t<std::is_same_v<U, double>, int> = 0>
inline Vectorized<T> mapSleef(FloatOp f, DoubleOp d, const Vectorized<T>& b)
const {
return Vectorized<T>(d(_vec0, b._vec0), d(_vec1, b._vec1));
@ -1112,7 +1112,7 @@ struct Vectorized<T, std::enable_if_t<is_zarch_implemented<T>()>> {
template <
typename U = T,
std::enable_if_t<!std::is_floating_point<U>::value, int> = 0>
std::enable_if_t<!std::is_floating_point_v<U>, int> = 0>
Vectorized<T> minimum(const Vectorized<T>& other) const {
return {vec_min(_vec0, other._vec0), vec_min(_vec1, other._vec1)};
}
@ -1120,7 +1120,7 @@ struct Vectorized<T, std::enable_if_t<is_zarch_implemented<T>()>> {
/* Propagates NaN if either input is a NaN. */
template <
typename U = T,
std::enable_if_t<std::is_floating_point<U>::value, int> = 0>
std::enable_if_t<std::is_floating_point_v<U>, int> = 0>
Vectorized<T> minimum(const Vectorized<T>& other) const {
Vectorized<T> tmp = {vec_min(_vec0, other._vec0), vec_min(_vec1, other._vec1)};
tmp = blendv(tmp, *this, isnan());
@ -1129,7 +1129,7 @@ struct Vectorized<T, std::enable_if_t<is_zarch_implemented<T>()>> {
template <
typename U = T,
std::enable_if_t<!std::is_floating_point<U>::value, int> = 0>
std::enable_if_t<!std::is_floating_point_v<U>, int> = 0>
Vectorized<T> maximum(const Vectorized<T>& other) const {
return {vec_max(_vec0, other._vec0), vec_max(_vec1, other._vec1)};
}
@ -1137,7 +1137,7 @@ struct Vectorized<T, std::enable_if_t<is_zarch_implemented<T>()>> {
/* Propagates NaN if either input is a NaN. */
template <
typename U = T,
std::enable_if_t<std::is_floating_point<U>::value, int> = 0>
std::enable_if_t<std::is_floating_point_v<U>, int> = 0>
Vectorized<T> maximum(const Vectorized<T>& other) const {
Vectorized<T> tmp = {vec_max(_vec0, other._vec0), vec_max(_vec1, other._vec1)};
tmp = blendv(tmp, *this, isnan());
@ -1146,7 +1146,7 @@ struct Vectorized<T, std::enable_if_t<is_zarch_implemented<T>()>> {
template <
typename U = T,
std::enable_if_t<!std::is_floating_point<U>::value, int> = 0>
std::enable_if_t<!std::is_floating_point_v<U>, int> = 0>
Vectorized<T> clamp_min(const Vectorized<T>& min) const {
return {vec_max(_vec0, min._vec0), vec_max(_vec1, min._vec1)};
}
@ -1154,7 +1154,7 @@ struct Vectorized<T, std::enable_if_t<is_zarch_implemented<T>()>> {
/* Keeps NaN if actual value is NaN */
template <
typename U = T,
std::enable_if_t<std::is_floating_point<U>::value, int> = 0>
std::enable_if_t<std::is_floating_point_v<U>, int> = 0>
Vectorized<T> clamp_min(const Vectorized<T>& min) const {
Vectorized<T> tmp = {vec_max(_vec0, min._vec0), vec_max(_vec1, min._vec1)};
return blendv(tmp, *this, isnan());
@ -1162,7 +1162,7 @@ struct Vectorized<T, std::enable_if_t<is_zarch_implemented<T>()>> {
template <
typename U = T,
std::enable_if_t<!std::is_floating_point<U>::value, int> = 0>
std::enable_if_t<!std::is_floating_point_v<U>, int> = 0>
Vectorized<T> clamp_max(const Vectorized<T>& max) const {
return {vec_min(_vec0, max._vec0), vec_min(_vec1, max._vec1)};
}
@ -1170,7 +1170,7 @@ struct Vectorized<T, std::enable_if_t<is_zarch_implemented<T>()>> {
/* Keeps NaN if actual value is NaN */
template <
typename U = T,
std::enable_if_t<std::is_floating_point<U>::value, int> = 0>
std::enable_if_t<std::is_floating_point_v<U>, int> = 0>
Vectorized<T> clamp_max(const Vectorized<T>& max) const {
Vectorized<T> tmp = {vec_min(_vec0, max._vec0), vec_min(_vec1, max._vec1)};
return blendv(tmp, *this, isnan());
@ -1178,7 +1178,7 @@ struct Vectorized<T, std::enable_if_t<is_zarch_implemented<T>()>> {
template <
typename U = T,
std::enable_if_t<std::is_same<U, float>::value, int> = 0>
std::enable_if_t<std::is_same_v<U, float>, int> = 0>
Vectorized<T> swapped() const {
auto swap_mask = GetSwapMaskFloat();
vtype v0 = vec_perm(_vec0, _vec0, swap_mask);
@ -1188,7 +1188,7 @@ struct Vectorized<T, std::enable_if_t<is_zarch_implemented<T>()>> {
template <
typename U = T,
std::enable_if_t<std::is_same<U, double>::value, int> = 0>
std::enable_if_t<std::is_same_v<U, double>, int> = 0>
Vectorized<T> swapped() const {
vtype v0 = {_vec0[1], _vec0[0]};
vtype v1 = {_vec1[1], _vec1[0]};
@ -1197,7 +1197,7 @@ struct Vectorized<T, std::enable_if_t<is_zarch_implemented<T>()>> {
template <
typename U = T,
std::enable_if_t<std::is_floating_point<U>::value, int> = 0>
std::enable_if_t<std::is_floating_point_v<U>, int> = 0>
static Vectorized<T> mergee(Vectorized<T>& first, Vectorized<T>& second) {
return {
vec_mergee(first._vec0, second._vec0),
@ -1206,7 +1206,7 @@ struct Vectorized<T, std::enable_if_t<is_zarch_implemented<T>()>> {
template <
typename U = T,
std::enable_if_t<std::is_floating_point<U>::value, int> = 0>
std::enable_if_t<std::is_floating_point_v<U>, int> = 0>
static Vectorized<T> mergeo(Vectorized<T>& first, Vectorized<T>& second) {
return {
vec_mergeo(first._vec0, second._vec0),
@ -1243,21 +1243,21 @@ struct Vectorized<T, std::enable_if_t<is_zarch_implemented<T>()>> {
template <
typename U = T,
std::enable_if_t<std::is_floating_point<U>::value, int> = 0>
std::enable_if_t<std::is_floating_point_v<U>, int> = 0>
Vectorized<T> mergee() const {
return {vec_mergee(_vec0, _vec0), vec_mergee(_vec1, _vec1)};
}
template <
typename U = T,
std::enable_if_t<std::is_floating_point<U>::value, int> = 0>
std::enable_if_t<std::is_floating_point_v<U>, int> = 0>
Vectorized<T> mergeo() const {
return {vec_mergeo(_vec0, _vec0), vec_mergeo(_vec1, _vec1)};
}
template <
typename U = T,
std::enable_if_t<std::is_same<U, uint8_t>::value, int> = 0>
std::enable_if_t<std::is_same_v<U, uint8_t>, int> = 0>
Vectorized<int32_t> to_vec_float_helper() const {
int32_t values[8] = {
_vec0[0],
@ -1278,7 +1278,7 @@ struct Vectorized<T, std::enable_if_t<is_zarch_implemented<T>()>> {
template <
typename U = T,
std::enable_if_t<std::is_same<U, int32_t>::value, int> = 0>
std::enable_if_t<std::is_same_v<U, int32_t>, int> = 0>
Vectorized<uint8_t> to_vec_uint8_helper() const {
// helper function for float to uint8_t conversion
uint8_t values[8] = {

View File

@ -1384,7 +1384,7 @@ inline void transpose_mxn<BFloat16>(const BFloat16* src, int64_t ld_src, BFloat1
}
template <typename T, int M, int N,
typename std::enable_if_t<std::is_same<T, BFloat16>::value && ((M <= 32 && M != 16) || (N <= 32 && N != 16)), int> = 0>
typename std::enable_if_t<std::is_same_v<T, BFloat16> && ((M <= 32 && M != 16) || (N <= 32 && N != 16)), int> = 0>
inline void transpose_mxn(const BFloat16* src, int64_t ld_src, BFloat16* dst, int64_t ld_dst) {
transpose_mxn<BFloat16>(src, ld_src, dst, ld_dst, M, N);
}
@ -1426,7 +1426,7 @@ inline void transpose_mxn<Half>(const Half* src, int64_t ld_src, Half* dst, int6
}
template <typename T, int M, int N,
typename std::enable_if_t<std::is_same<T, Half>::value && ((M <= 32 && M != 16) || (N <= 32 && N != 16)), int> = 0>
typename std::enable_if_t<std::is_same_v<T, Half> && ((M <= 32 && M != 16) || (N <= 32 && N != 16)), int> = 0>
inline void transpose_mxn(const Half* src, int64_t ld_src, Half* dst, int64_t ld_dst) {
transpose_mxn<Half>(src, ld_src, dst, ld_dst, M, N);
}

View File

@ -701,7 +701,7 @@ inline void transpose_mxn<float>(const float* src, int64_t ld_src, float* dst, i
}
template <typename T, int M, int N,
typename std::enable_if_t<std::is_same<T, float>::value, int> = 0>
typename std::enable_if_t<std::is_same_v<T, float>, int> = 0>
inline void transpose_mxn(const float* src, int64_t ld_src, float* dst, int64_t ld_dst) {
transpose_mxn<float>(src, ld_src, dst, ld_dst, M, N);
}

View File

@ -61,15 +61,15 @@ class ConstCuSparseDescriptor {
#endif // AT_USE_CUSPARSE_CONST_DESCRIPTORS || AT_USE_HIPSPARSE_CONST_DESCRIPTORS
#if defined(USE_ROCM)
using cusparseMatDescr = std::remove_pointer<hipsparseMatDescr_t>::type;
using cusparseDnMatDescr = std::remove_pointer<hipsparseDnMatDescr_t>::type;
using cusparseDnVecDescr = std::remove_pointer<hipsparseDnVecDescr_t>::type;
using cusparseSpMatDescr = std::remove_pointer<hipsparseSpMatDescr_t>::type;
using cusparseSpMatDescr = std::remove_pointer<hipsparseSpMatDescr_t>::type;
using cusparseSpGEMMDescr = std::remove_pointer<hipsparseSpGEMMDescr_t>::type;
using cusparseMatDescr = std::remove_pointer_t<hipsparseMatDescr_t>;
using cusparseDnMatDescr = std::remove_pointer_t<hipsparseDnMatDescr_t>;
using cusparseDnVecDescr = std::remove_pointer_t<hipsparseDnVecDescr_t>;
using cusparseSpMatDescr = std::remove_pointer_t<hipsparseSpMatDescr_t>;
using cusparseSpMatDescr = std::remove_pointer_t<hipsparseSpMatDescr_t>;
using cusparseSpGEMMDescr = std::remove_pointer_t<hipsparseSpGEMMDescr_t>;
#if AT_USE_HIPSPARSE_TRIANGULAR_SOLVE()
using bsrsv2Info = std::remove_pointer<bsrsv2Info_t>::type;
using bsrsm2Info = std::remove_pointer<bsrsm2Info_t>::type;
using bsrsv2Info = std::remove_pointer_t<bsrsv2Info_t>;
using bsrsm2Info = std::remove_pointer_t<bsrsm2Info_t>;
#endif
#endif

View File

@ -23,7 +23,7 @@ getTensorInfo(const at::TensorBase &t) {
scalar* data_ptr = nullptr;
if constexpr (std::is_const<scalar>::value) {
if constexpr (std::is_const_v<scalar>) {
data_ptr = t.const_data_ptr<scalar>();
} else {
data_ptr = t.mutable_data_ptr<scalar>();

View File

@ -42,9 +42,9 @@ namespace at::native::templates {
template<typename scalar_t>
int64_t update_from(int64_t from) {
static_assert(
std::is_floating_point<scalar_t>::value ||
std::is_same<scalar_t, at::Half>::value ||
std::is_same<scalar_t, at::BFloat16>::value, "scalar_t must be floating-point type");
std::is_floating_point_v<scalar_t> ||
std::is_same_v<scalar_t, at::Half> ||
std::is_same_v<scalar_t, at::BFloat16>, "scalar_t must be floating-point type");
const auto from_plus_1 = static_cast<int64_t>(static_cast<scalar_t>(from + 1));
if (from_plus_1 < from) {
int64_t from_ = std::abs(from + 1);
@ -59,9 +59,9 @@ int64_t update_from(int64_t from) {
template<typename scalar_t>
int64_t update_to(int64_t to) {
static_assert(
std::is_floating_point<scalar_t>::value ||
std::is_same<scalar_t, at::Half>::value ||
std::is_same<scalar_t, at::BFloat16>::value, "scalar_t must be floating-point type");
std::is_floating_point_v<scalar_t> ||
std::is_same_v<scalar_t, at::Half> ||
std::is_same_v<scalar_t, at::BFloat16>, "scalar_t must be floating-point type");
const auto to_minus_1 = static_cast<int64_t>(static_cast<scalar_t>(to - 1));
if (to_minus_1 >= to) {
int64_t to_ = std::abs(to - 1);

View File

@ -25,7 +25,7 @@ namespace at::native {
namespace {
template<bool inplace>
using Ctype = typename std::conditional<inplace, Tensor&, Tensor>::type;
using Ctype = typename std::conditional_t<inplace, Tensor&, Tensor>;
Tensor make_feature_noise(const Tensor& input) {
auto input_sizes = input.sym_sizes();

View File

@ -280,7 +280,7 @@ index_select_add(
for (int64_t i = start_idx; i < end_idx; i++) {
// Convert FP32 intermediate buffer result back to 16 bit for
// output dtype
if constexpr (std::is_same<data_t, at::Half>::value) {
if constexpr (std::is_same_v<data_t, at::Half>) {
// FP16
for (const auto d : c10::irange(ddim)) {
(output_data + i * ddim)[d] =
@ -662,7 +662,7 @@ index_select_scale_add(
for (int64_t i = start_idx; i < end_idx; i++) {
// Convert FP32 intermediate buffer result back to 16 bit for
// output dtype
if constexpr (std::is_same<data_t, at::Half>::value) {
if constexpr (std::is_same_v<data_t, at::Half>) {
// FP16
for (const auto d : c10::irange(ddim)) {
(output_data + i * ddim)[d] =

View File

@ -130,7 +130,7 @@ std::tuple<Tensor, Tensor> ctc_loss_cpu_template(const Tensor& log_probs, const
// log_probs: input_len x batch_size x num_labels
// targets [int64]: batch_size x target_length OR sum(target_lengths)
constexpr scalar_t neginf = -std::numeric_limits<scalar_t>::infinity();
using target_t = typename std::conditional<target_scalar_type == kInt, int, int64_t>::type;
using target_t = typename std::conditional_t<target_scalar_type == kInt, int, int64_t>;
Tensor neg_log_likelihood, log_alpha;
size_t tg_target_stride;
@ -233,7 +233,7 @@ template<typename scalar_t, ScalarType target_scalar_type>
Tensor ctc_loss_backward_cpu_template(const Tensor& grad_out, const Tensor& log_probs, const Tensor& targets, IntArrayRef input_lengths, IntArrayRef target_lengths,
const Tensor& neg_log_likelihood, const Tensor& log_alpha, int64_t BLANK, bool zero_infinity) {
constexpr scalar_t neginf = -std::numeric_limits<scalar_t>::infinity();
using target_t = typename std::conditional<target_scalar_type == kInt, int, int64_t>::type;
using target_t = typename std::conditional_t<target_scalar_type == kInt, int, int64_t>;
int64_t max_input_length = log_probs.size(0);
int64_t batch_size = log_probs.size(1);
int64_t num_labels = log_probs.size(2);

View File

@ -147,7 +147,7 @@ inline Tensor optional_contiguous(const Tensor& source) {
// or nullptr if the tensor is undefined.
template <typename scalar_t>
inline scalar_t* optional_data(const Tensor& source) {
if constexpr (std::is_const<scalar_t>::value) {
if constexpr (std::is_const_v<scalar_t>) {
return source.defined() ? source.const_data_ptr<scalar_t>() : nullptr;
} else {
return source.defined() ? source.data_ptr<scalar_t>() : nullptr;

View File

@ -35,7 +35,7 @@ inline Tensor optional_contiguous(const Tensor& source) {
// or nullptr if the tensor is undefined.
template <typename scalar_t>
inline scalar_t* optional_data(const Tensor& source) {
if constexpr (std::is_const<scalar_t>::value) {
if constexpr (std::is_const_v<scalar_t>) {
return source.defined() ? source.const_data_ptr<scalar_t>() : nullptr;
} else {
return source.defined() ? source.data_ptr<scalar_t>() : nullptr;

View File

@ -625,7 +625,7 @@ static scalar_t _igam_helper_fac(scalar_t a, scalar_t x) {
// exp(a - x).
scalar_t ax, fac, res, num, numfac;
static scalar_t MAXLOG = std::is_same<scalar_t,double>::value ?
static scalar_t MAXLOG = std::is_same_v<scalar_t,double> ?
7.09782712893383996843E2 : 88.72283905206835;
static scalar_t EXP1 = 2.718281828459045;
static scalar_t lanczos_g = 6.024680040776729583740234375;
@ -655,7 +655,7 @@ static scalar_t _igam_helper_fac(scalar_t a, scalar_t x) {
template <typename scalar_t>
static scalar_t _igam_helper_series(scalar_t a, scalar_t x) {
// Compute igam using DLMF 8.11.4. [igam1]
static scalar_t MACHEP = std::is_same<scalar_t, double>::value ?
static scalar_t MACHEP = std::is_same_v<scalar_t, double> ?
1.11022302462515654042E-16 : 5.9604644775390625E-8;
static int MAXITER = 2000;
@ -693,7 +693,7 @@ static scalar_t _igamc_helper_series(scalar_t a, scalar_t x) {
scalar_t sum = 0;
scalar_t term, logx;
static scalar_t MAXITER = 2000;
static scalar_t MACHEP = std::is_same<scalar_t, double>::value ?
static scalar_t MACHEP = std::is_same_v<scalar_t, double> ?
1.11022302462515654042E-16 : 5.9604644775390625E-8;
for (n = 1; n < MAXITER; n++) {
@ -942,7 +942,7 @@ static scalar_t _igam_helper_asymptotic_series(scalar_t a, scalar_t x, bool igam
int k, n, sgn;
int maxpow = 0;
static scalar_t MACHEP = std::is_same<scalar_t, double>::value ?
static scalar_t MACHEP = std::is_same_v<scalar_t, double> ?
1.11022302462515654042E-16 : 5.9604644775390625E-8;
scalar_t lambda = x / a;
scalar_t sigma = (x - a) / a;
@ -1007,11 +1007,11 @@ static scalar_t _igamc_helper_continued_fraction(scalar_t a, scalar_t x) {
scalar_t ans, ax, c, yc, r, t, y, z;
scalar_t pk, pkm1, pkm2, qk, qkm1, qkm2;
int MAXITER = 2000;
static scalar_t MACHEP = std::is_same<scalar_t, double>::value ?
static scalar_t MACHEP = std::is_same_v<scalar_t, double> ?
1.11022302462515654042E-16 : 5.9604644775390625E-8;
static scalar_t BIG = std::is_same<scalar_t,double>::value ?
static scalar_t BIG = std::is_same_v<scalar_t,double> ?
4.503599627370496e15 : 16777216.;
static scalar_t BIGINV = std::is_same<scalar_t,double>::value ?
static scalar_t BIGINV = std::is_same_v<scalar_t,double> ?
2.22044604925031308085e-16 : 5.9604644775390625E-8;
ax = _igam_helper_fac(a, x);

View File

@ -477,17 +477,17 @@ inline void compute_source_index_and_lambda(
// It will not be used by data types other than BFloat16 and Half.
template <typename scalar_in, typename scalar_out,
typename std::enable_if_t<!is_reduced_floating_point_v<scalar_out> || !std::is_same<scalar_in, float>::value, int> = 0>
typename std::enable_if_t<!is_reduced_floating_point_v<scalar_out> || !std::is_same_v<scalar_in, float>, int> = 0>
void inline apply_grad_input(scalar_in* buffer_ptr, scalar_out* gin, int64_t size) {
TORCH_CHECK((is_reduced_floating_point_v<scalar_out>),
"Upsample backward only support BFloat16 and Half in the lower precision data types on CPU.")
TORCH_CHECK((std::is_same<scalar_in, float>::value),
TORCH_CHECK((std::is_same_v<scalar_in, float>),
"Upsample backward should use float as acc buffer for BFloat16 and Half grad input on CPU.")
return;
}
template <typename scalar_in, typename scalar_out,
typename std::enable_if_t<is_reduced_floating_point_v<scalar_out> && std::is_same<scalar_in, float>::value, int> = 0>
typename std::enable_if_t<is_reduced_floating_point_v<scalar_out> && std::is_same_v<scalar_in, float>, int> = 0>
void inline apply_grad_input(scalar_in* buffer_ptr, scalar_out* gin, int64_t size) {
using bVec = Vectorized<scalar_out>;
using fVec = Vectorized<float>;

View File

@ -129,7 +129,7 @@ static void upsample_bicubic2d_backward_out_frame(
at::parallel_for(0, channels, at::internal::GRAIN_SIZE / output_slice_size / 4, [&](int64_t start, int64_t end) {
opmath_t* acc_data_ptr = nullptr;
std::unique_ptr<opmath_t[]> buffer_data;
if constexpr (!std::is_same<scalar_t, opmath_t>::value) {
if constexpr (!std::is_same_v<scalar_t, opmath_t>) {
buffer_data = std::make_unique<opmath_t[]>(input_slice_size);
acc_data_ptr = buffer_data.get();
memset(acc_data_ptr, 0, sizeof(opmath_t) * input_slice_size);

View File

@ -60,7 +60,7 @@ void bernoulli_scalar_kernel(const TensorBase &self, double p, std::optional<Gen
AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Bool, at::ScalarType::BFloat16, at::ScalarType::Half,
self.scalar_type(), "bernoulli_scalar_cpu_", [&] {
at::Tensor tmp_int_tensor;
if (std::is_same<scalar_t, int>::value && contig) {
if (std::is_same_v<scalar_t, int> && contig) {
tmp_int_tensor = self;
} else {
tmp_int_tensor = at::empty(self.sizes(), self.options().dtype(at::kInt));
@ -81,7 +81,7 @@ void bernoulli_scalar_kernel(const TensorBase &self, double p, std::optional<Gen
// vectorized copy if using buffer and contiguous, i.e., being non-int
// type and contiguous
if (!std::is_same<scalar_t, int>::value && contig) {
if (!std::is_same_v<scalar_t, int> && contig) {
scalar_t *self_seg = self_ptr + begin;
int* tmp_seg = sample_int_ptr + begin;
at::vec::convert<int, scalar_t>(tmp_seg, self_seg, len);
@ -129,17 +129,17 @@ void exponential_kernel(TensorIteratorBase &iter, double lambda, std::optional<G
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "exponential_cpu", [&] {
at::Tensor tmp_tensor;
constexpr bool is_df = std::is_same<scalar_t, float>::value || std::is_same<scalar_t, double>::value;
constexpr bool is_df = std::is_same_v<scalar_t, float> || std::is_same_v<scalar_t, double>;
if (is_df && contig) {
tmp_tensor = self;
} else if (std::is_same<scalar_t, double>::value) {
} else if (std::is_same_v<scalar_t, double>) {
tmp_tensor = at::empty(self.sizes(), self.options().dtype(at::kDouble));
} else {
tmp_tensor = at::empty(self.sizes(), self.options().dtype(at::kFloat));
}
scalar_t *self_ptr = self.data_ptr<scalar_t>();
using tmp_scalar_t = typename std::conditional_t<std::is_same<scalar_t, double>::value, double, float>;
using tmp_scalar_t = typename std::conditional_t<std::is_same_v<scalar_t, double>, double, float>;
tmp_scalar_t *sample_ptr = tmp_tensor.data_ptr<tmp_scalar_t>();
// Intel MKL vRngExponential variate originally does not exclude 0.
@ -159,7 +159,7 @@ void exponential_kernel(TensorIteratorBase &iter, double lambda, std::optional<G
int64_t len = end - begin;
if (len > 0) {
VSLStreamStatePtr stream;
if constexpr (std::is_same<scalar_t, double>::value) {
if constexpr (std::is_same_v<scalar_t, double>) {
vslNewStream(&stream, VSL_BRNG_MCG31, seed);
vslSkipAheadStream(stream, begin);
vdRngExponential(VSL_RNG_METHOD_EXPONENTIAL_ICDF, stream, len,

View File

@ -16,7 +16,7 @@ namespace {
template <typename scalar_t>
void fill_non_native_type(TensorIterator& iter, const Scalar& value_scalar) {
auto value = value_scalar.to<scalar_t>().x;
using H = typename std::make_signed<decltype(value)>::type; // Signed type has more acceleration
using H = typename std::make_signed_t<decltype(value)>; // Signed type has more acceleration
// Reserve the representation of value. static_cast<H>(value) is implementation defined.
H val = *reinterpret_cast<H*>(std::addressof(value));
cpu_kernel_vec</*check_dynamic_cast=*/false>(

View File

@ -388,7 +388,7 @@ void cpu_masked_select_serial_kernel(TensorIterator& iter, const func_t& f) {
char* mask = data[2];
for (const auto i : c10::irange(n)) {
mask_t mask_value = *(mask_t*)(mask + strides[2] * i);
if constexpr (!std::is_same<mask_t, bool>::value) {
if constexpr (!std::is_same_v<mask_t, bool>) {
TORCH_CHECK(mask_value == 0 || mask_value == 1, "Mask tensor can take 0 and 1 values only");
}
if (mask_value) {
@ -426,7 +426,7 @@ void cpu_masked_select_kernel(TensorIterator& iter, const func_t& f) {
char* mask_prefix_sum = data[3];
for (const auto i : c10::irange(n)) {
mask_t mask_value = *(mask_t*)(mask + strides[2] * i);
if constexpr (!std::is_same<mask_t, bool>::value) {
if constexpr (!std::is_same_v<mask_t, bool>) {
TORCH_CHECK(mask_value == 0 || mask_value == 1, "Mask tensor can take 0 and 1 values only");
}
if (mask_value) {

View File

@ -172,7 +172,7 @@ multiple_outputs_loop(char* C10_RESTRICT data[], const int64_t* strides_, int64_
using traits = function_traits<func_t>;
using result_type = typename traits::result_type;
constexpr int num_outputs = std::tuple_size<result_type>::value;
constexpr int num_outputs = std::tuple_size_v<result_type>;
constexpr int ntensors = traits::arity + num_outputs;
// Copying strides to temporary array helps auto vectorization in older GCC

View File

@ -19,7 +19,7 @@ namespace {
template <typename scalar_t>
bool is_nan(scalar_t v) {
if (std::is_integral<scalar_t>::value || std::is_same<scalar_t, unsigned char>::value) {
if (std::is_integral_v<scalar_t> || std::is_same_v<scalar_t, unsigned char>) {
return false;
}
return std::isnan(v);
@ -429,7 +429,7 @@ void cpu_max_pool_channels_last(
// temp buffer holding max value with opmath_t
std::unique_ptr<opmath_t []> max_arr;
opmath_t* max_ptr = nullptr;
if (!std::is_same<scalar_t, opmath_t>::value) {
if (!std::is_same_v<scalar_t, opmath_t>) {
max_arr = std::make_unique<opmath_t[]>(size);
max_ptr = max_arr.get();
}

View File

@ -45,7 +45,7 @@ static void arange_kernel(TensorIterator& iter, const Scalar& scalar_start, cons
static void linspace_kernel(TensorIterator& iter, const Scalar& scalar_start, const Scalar& scalar_end, int64_t steps) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(kHalf, kBFloat16, iter.dtype(), "linspace_cpu", [&]() {
// step should be of double type for all integral types
using step_t = std::conditional_t<std::is_integral<scalar_t>::value, double, scalar_t>;
using step_t = std::conditional_t<std::is_integral_v<scalar_t>, double, scalar_t>;
const scalar_t start = scalar_start.to<scalar_t>();
const scalar_t end = scalar_end.to<scalar_t>();
// Cast `end` and `start` to `step_t`, since range can be larger than scalar_t for integral types

View File

@ -114,7 +114,7 @@ inline void vectorized_outer_reduction(char** data, int64_t inner_stride, int64_
template<typename traits, typename res_t>
static void set_result(const int index, const res_t result, const TensorIteratorBase &iter, const int num_outputs) {
// static_assert(std::is_same<res_t, typename traits::arg2_t>::value, "data types must match");
// static_assert(std::is_same_v<res_t, typename traits::arg2_t>, "data types must match");
if (index < num_outputs) {
char *out = (char *) iter.data_ptr(index);
*(res_t *) out = result;
@ -202,7 +202,7 @@ void binary_kernel_reduce(TensorIteratorBase& iter, ops_t ops, init_t init) {
typename c_traits::result_type>::value,
"all accumulate types must match");
static_assert(
std::is_default_constructible<acc_t>::value,
std::is_default_constructible_v<acc_t>,
"the accumulate type must be default-constructible"
);
const int num_outputs = iter.noutputs();
@ -229,7 +229,7 @@ void binary_kernel_reduce(TensorIteratorBase& iter, ops_t ops, init_t init) {
int max_threads = at::get_num_threads();
AT_ASSERT(max_threads > 0);
static_assert(
!std::is_same<acc_t, bool>::value,
!std::is_same_v<acc_t, bool>,
"Concurrently modifying different references into std::vector<bool> is UB."
);
std::vector<acc_t> buffer((unsigned)max_threads, init);

View File

@ -1237,7 +1237,7 @@ ApplyInputGradientsChannelsLastRowMov(
template <typename T, typename PT, typename opmath_t>
inline typename std::
enable_if<std::is_same<T, opmath_t>::value, std::tuple<opmath_t, opmath_t>>::type
enable_if<std::is_same_v<T, opmath_t>, std::tuple<opmath_t, opmath_t>>::type
CalcInternalGradientsChannelsLast(
const T* X_data,
const T* dY_data,
@ -1292,7 +1292,7 @@ inline typename std::
template <typename T, typename PT, typename opmath_t>
inline typename std::
enable_if<!std::is_same<T, opmath_t>::value, std::tuple<opmath_t, opmath_t>>::type
enable_if<!std::is_same_v<T, opmath_t>, std::tuple<opmath_t, opmath_t>>::type
CalcInternalGradientsChannelsLast(
const T* X_data,
const T* dY_data,

View File

@ -296,7 +296,7 @@ void layer_norm_backward_frame(
}
template <typename T, typename T2, typename opmath_t,
typename std::enable_if_t<is_reduced_floating_point_v<T> && std::is_same<T2, float>::value, int> = 0>
typename std::enable_if_t<is_reduced_floating_point_v<T> && std::is_same_v<T2, float>, int> = 0>
void layer_norm_backward_frame(
const T* dY_data,
const T* X_data,

View File

@ -212,8 +212,8 @@ template <typename input_scalar_t, typename stat_scalar_t, typename stat_accscal
__global__ void batch_norm_transform_input_kernel(
const GenericPackedTensorAccessor<const input_scalar_t, 3, RestrictPtrTraits, index_t> input,
GenericPackedTensorAccessor<input_scalar_t, 3, RestrictPtrTraits, index_t> output,
const GenericPackedTensorAccessor<typename std::conditional<train, stat_accscalar_t, stat_scalar_t>::type, 1, RestrictPtrTraits, index_t> mean_,
const GenericPackedTensorAccessor<typename std::conditional<train, stat_accscalar_t, stat_scalar_t>::type, 1, RestrictPtrTraits, index_t> var_or_invstd,
const GenericPackedTensorAccessor<typename std::conditional_t<train, stat_accscalar_t, stat_scalar_t>, 1, RestrictPtrTraits, index_t> mean_,
const GenericPackedTensorAccessor<typename std::conditional_t<train, stat_accscalar_t, stat_scalar_t>, 1, RestrictPtrTraits, index_t> var_or_invstd,
const GenericPackedTensorAccessor<const stat_scalar_t, 1, RestrictPtrTraits, index_t> weight,
const GenericPackedTensorAccessor<const stat_scalar_t, 1, RestrictPtrTraits, index_t> bias,
stat_accscalar_t epsilon) {
@ -582,7 +582,7 @@ __global__ void batch_norm_backward_elemt_kernel(
template <typename scalar_t, int64_t dim, template <typename U> class PtrTraits = DefaultPtrTraits, typename index_t = int64_t>
static GenericPackedTensorAccessor<scalar_t, dim, PtrTraits, index_t> get_packed_accessor(
const Tensor& t, c10::string_view var_name) {
constexpr auto expect_type = c10::CppTypeToScalarType<typename std::remove_const<scalar_t>::type>::value;
constexpr auto expect_type = c10::CppTypeToScalarType<typename std::remove_const_t<scalar_t>>::value;
const auto actual_type = t.scalar_type();
TORCH_CHECK(actual_type == expect_type, "Expected ", var_name,
" to have type ", expect_type, " but got ", actual_type);

View File

@ -187,7 +187,7 @@ struct ConvolutionParams
};
// ConvolutionParams must be a POD because we read out its memory
// contenst as char* when hashing
static_assert(std::is_standard_layout<ConvolutionParams>::value, "ConvolutionParams not POD");
static_assert(std::is_standard_layout_v<ConvolutionParams>, "ConvolutionParams not POD");
void setConvolutionParams(
ConvolutionParams* params, miopenHandle_t handle,

View File

@ -803,7 +803,7 @@ std::tuple<Tensor, Tensor> unpack_hidden(const std::tuple<Tensor, Tensor>& hidde
template<typename hidden_type>
hidden_type pack_hidden(const Tensor& hx, const Tensor& cx) {
static_assert(std::is_same<hidden_type, void>::value, "pack_hidden not implemented for this type");
static_assert(std::is_same_v<hidden_type, void>, "pack_hidden not implemented for this type");
TORCH_CHECK(false, "NOT IMPLEMENTED");
}

View File

@ -33,7 +33,7 @@ std::vector<size_t> get_mem_format_aware_shape(const at::Tensor& in) {
template <typename PT>
void q8_copy_int8_weight_and_add_offset(const at::Tensor& in, at::Tensor& out) {
using T = typename PT::underlying;
static constexpr auto offset = std::is_same<T, uint8_t>::value ? 128 : 0;
static constexpr auto offset = std::is_same_v<T, uint8_t> ? 128 : 0;
TORCH_CHECK(
in.scalar_type() == c10::kQInt8,
"q8_copy_int8_weight_and_add_offset: Expected input weight data type ",

View File

@ -654,7 +654,7 @@ at::Tensor PackedConvWeightsQnnp<kSpatialDim>::apply_impl_xnnp(
if (!per_channel()) {
w_zp = static_cast<underlying_t>(
weight_contig.q_zero_point() +
(std::is_same<underlying_t, uint8_t>::value ? 128 : 0));
(std::is_same_v<underlying_t, uint8_t> ? 128 : 0));
weight_tensor = at::native::empty_affine_quantized(
weight_contig.sizes(),

View File

@ -491,7 +491,7 @@ at::Tensor PackedLinearWeightsQnnp::apply_impl_xnnp(
// prepare weights
underlying_t w_zp = static_cast<underlying_t>(
orig_weight.q_zero_point() +
(std::is_same<underlying_t, uint8_t>::value ? 128 : 0));
(std::is_same_v<underlying_t, uint8_t> ? 128 : 0));
at::Tensor xnnp_weight = at::_empty_affine_quantized(
orig_weight.sizes(),

View File

@ -754,7 +754,7 @@ void _apply_sparse_csr_linear_solve(
scalar_t* values_ptr = values.data_ptr<scalar_t>();
scalar_t* b_ptr = b.data_ptr<scalar_t>();
scalar_t* x_ptr = x.data_ptr<scalar_t>();
auto CUDA_R_TYP = std::is_same<scalar_t, double>::value ? CUDA_R_64F : CUDA_R_32F;
auto CUDA_R_TYP = std::is_same_v<scalar_t, double> ? CUDA_R_64F : CUDA_R_32F;
TORCH_CUDSS_CHECK(cudssMatrixCreateDn(&b_mt, b.size(0), 1, b.size(0), b_ptr, CUDA_R_TYP, CUDSS_LAYOUT_COL_MAJOR));
TORCH_CUDSS_CHECK(cudssMatrixCreateDn(&x_mt, x.size(0), 1, x.size(0), x_ptr, CUDA_R_TYP, CUDSS_LAYOUT_COL_MAJOR));
TORCH_CUDSS_CHECK(cudssMatrixCreateCsr(&A_mt, A.size(0), A.size(1), A._nnz(), rowOffsets, rowOffsets + crow.size(0), colIndices, values_ptr, CUDA_R_32I, CUDA_R_TYP, CUDSS_MTYPE_GENERAL, CUDSS_MVIEW_FULL, CUDSS_BASE_ZERO));

View File

@ -207,10 +207,10 @@ struct CusparseMatrixMultiplyOp {
CusparseMatrixMultiplyOp() {
static_assert(
std::is_same<c10::Half, scalar_t>::value ||
std::is_same<c10::BFloat16, scalar_t>::value ||
std::is_same<float, scalar_t>::value ||
std::is_same<double, scalar_t>::value ||
std::is_same_v<c10::Half, scalar_t> ||
std::is_same_v<c10::BFloat16, scalar_t> ||
std::is_same_v<float, scalar_t> ||
std::is_same_v<double, scalar_t> ||
std::is_same<c10::complex<float>, scalar_t>::value ||
std::is_same<c10::complex<double>, scalar_t>::value,
"cusparseSpGEMM only supports data type of half, bfloat16, float, double and complex float, double.");
@ -669,10 +669,10 @@ void sparse_sparse_matmul_cuda_kernel(
const Tensor& mat2) {
static_assert(
std::is_same<c10::Half, scalar_t>::value ||
std::is_same<c10::BFloat16, scalar_t>::value ||
std::is_same<float, scalar_t>::value ||
std::is_same<double, scalar_t>::value ||
std::is_same_v<c10::Half, scalar_t> ||
std::is_same_v<c10::BFloat16, scalar_t> ||
std::is_same_v<float, scalar_t> ||
std::is_same_v<double, scalar_t> ||
std::is_same<c10::complex<float>, scalar_t>::value ||
std::is_same<c10::complex<double>, scalar_t>::value,
"sparse_sparse_matmul_cuda_kernel only supports data type of half, bfloat16, float, double and complex float, double.");

View File

@ -211,8 +211,8 @@ void spgemm_cutlass(
AlphaArguments alpha_arguments{
[&]() -> AlphaArguments {
if constexpr (std::is_same<ElementComputeEpilogue, cutlass::half_t>::value ||
std::is_same<ElementComputeEpilogue, cutlass::bfloat16_t>::value) {
if constexpr (std::is_same_v<ElementComputeEpilogue, cutlass::half_t> ||
std::is_same_v<ElementComputeEpilogue, cutlass::bfloat16_t>) {
return {ElementComputeEpilogue{alpha.to<float>()}};
} else {
return {alpha.to<ElementComputeEpilogue>()};
@ -221,8 +221,8 @@ void spgemm_cutlass(
};
BetaArguments beta_arguments{
[&]() -> BetaArguments {
if constexpr (std::is_same<ElementComputeEpilogue, cutlass::half_t>::value ||
std::is_same<ElementComputeEpilogue, cutlass::bfloat16_t>::value) {
if constexpr (std::is_same_v<ElementComputeEpilogue, cutlass::half_t> ||
std::is_same_v<ElementComputeEpilogue, cutlass::bfloat16_t>) {
return {ElementComputeEpilogue{beta.to<float>()}};
} else {
return {beta.to<ElementComputeEpilogue>()};

View File

@ -55,7 +55,7 @@ struct Dropout {
// We're exploiting the fact that floating point comparison is equivalent to integer
// comparison, since we're comparing unsigned integers whose top 8-bits are zero.
if (!encode_dropout_in_sign_bit
&& (std::is_same<T, cutlass::half_t>::value || std::is_same<T, cutlass::bfloat16_t>::value)) {
&& (std::is_same_v<T, cutlass::half_t> || std::is_same_v<T, cutlass::bfloat16_t>)) {
uint16_t rnd_16[16];
#pragma unroll
for (int i = 0; i < 16; i++) { rnd_16[i] = uint16_t(rnd_8[i]); }

View File

@ -884,31 +884,31 @@ template <typename T> void dispatch_cutlassB_f32_sm80(T cb, int cc) {
template <typename DT, typename T>
void dispatch_cutlassB(T cb, int cc = 0) {
if (std::is_same<DT, cutlass::half_t>::value && 70 <= cc && cc < 75) {
if (std::is_same_v<DT, cutlass::half_t> && 70 <= cc && cc < 75) {
dispatch_cutlassB_f16_sm70(cb, cc);
}
if (std::is_same<DT, cutlass::bfloat16_t>::value && 80 <= cc && cc < 100) {
if (std::is_same_v<DT, cutlass::bfloat16_t> && 80 <= cc && cc < 100) {
dispatch_cutlassB_bf16_sm80(cb, cc);
}
if (std::is_same<DT, cutlass::half_t>::value && 80 <= cc && cc < 100) {
if (std::is_same_v<DT, cutlass::half_t> && 80 <= cc && cc < 100) {
dispatch_cutlassB_f16_sm80(cb, cc);
}
if (std::is_same<DT, cutlass::half_t>::value && 50 <= cc && cc < 70) {
if (std::is_same_v<DT, cutlass::half_t> && 50 <= cc && cc < 70) {
dispatch_cutlassB_f16_sm50(cb, cc);
}
if (std::is_same<DT, float>::value && 50 <= cc && cc < 70) {
if (std::is_same_v<DT, float> && 50 <= cc && cc < 70) {
dispatch_cutlassB_f32_sm50(cb, cc);
}
if (std::is_same<DT, float>::value && 70 <= cc && cc < 75) {
if (std::is_same_v<DT, float> && 70 <= cc && cc < 75) {
dispatch_cutlassB_f32_sm70(cb, cc);
}
if (std::is_same<DT, cutlass::half_t>::value && 75 <= cc && cc < 80) {
if (std::is_same_v<DT, cutlass::half_t> && 75 <= cc && cc < 80) {
dispatch_cutlassB_f16_sm75(cb, cc);
}
if (std::is_same<DT, float>::value && 75 <= cc && cc < 80) {
if (std::is_same_v<DT, float> && 75 <= cc && cc < 80) {
dispatch_cutlassB_f32_sm75(cb, cc);
}
if (std::is_same<DT, float>::value && 80 <= cc && cc < 100) {
if (std::is_same_v<DT, float> && 80 <= cc && cc < 100) {
dispatch_cutlassB_f32_sm80(cb, cc);
}
}

View File

@ -283,31 +283,31 @@ template <typename T> void dispatch_cutlassF_f32_sm80(T cb, int cc) {
template <typename DT, typename T>
void dispatch_cutlassF(T cb, int cc = 0) {
if (std::is_same<DT, cutlass::bfloat16_t>::value && 80 <= cc && cc < 100) {
if (std::is_same_v<DT, cutlass::bfloat16_t> && 80 <= cc && cc < 100) {
dispatch_cutlassF_bf16_sm80(cb, cc);
}
if (std::is_same<DT, cutlass::half_t>::value && 50 <= cc && cc < 70) {
if (std::is_same_v<DT, cutlass::half_t> && 50 <= cc && cc < 70) {
dispatch_cutlassF_f16_sm50(cb, cc);
}
if (std::is_same<DT, cutlass::half_t>::value && 70 <= cc && cc < 75) {
if (std::is_same_v<DT, cutlass::half_t> && 70 <= cc && cc < 75) {
dispatch_cutlassF_f16_sm70(cb, cc);
}
if (std::is_same<DT, cutlass::half_t>::value && 75 <= cc && cc < 80) {
if (std::is_same_v<DT, cutlass::half_t> && 75 <= cc && cc < 80) {
dispatch_cutlassF_f16_sm75(cb, cc);
}
if (std::is_same<DT, cutlass::half_t>::value && 80 <= cc && cc < 100) {
if (std::is_same_v<DT, cutlass::half_t> && 80 <= cc && cc < 100) {
dispatch_cutlassF_f16_sm80(cb, cc);
}
if (std::is_same<DT, float>::value && 50 <= cc && cc < 70) {
if (std::is_same_v<DT, float> && 50 <= cc && cc < 70) {
dispatch_cutlassF_f32_sm50(cb, cc);
}
if (std::is_same<DT, float>::value && 70 <= cc && cc < 75) {
if (std::is_same_v<DT, float> && 70 <= cc && cc < 75) {
dispatch_cutlassF_f32_sm70(cb, cc);
}
if (std::is_same<DT, float>::value && 75 <= cc && cc < 80) {
if (std::is_same_v<DT, float> && 75 <= cc && cc < 80) {
dispatch_cutlassF_f32_sm75(cb, cc);
}
if (std::is_same<DT, float>::value && 80 <= cc && cc < 100) {
if (std::is_same_v<DT, float> && 80 <= cc && cc < 100) {
dispatch_cutlassF_f32_sm80(cb, cc);
}
}

View File

@ -352,7 +352,7 @@ def write_decl_impl(
declarations += f" {_call}"
declarations += "}\n\n"
dispatch_all += f"""
if (std::is_same<DT, {DTYPES[cat_dt]}>::value && {cat_sm} <= cc && cc < {cat_sm_max}) {{
if (std::is_same_v<DT, {DTYPES[cat_dt]}> && {cat_sm} <= cc && cc < {cat_sm_max}) {{
{dispatch_category_fn}(cb, cc);
}}"""

View File

@ -197,7 +197,7 @@ inline constexpr To safe_downcast(const From& v) {
template <typename To, typename From>
inline constexpr bool is_signed_to_unsigned() {
return std::is_signed<From>::value && std::is_unsigned<To>::value;
return std::is_signed_v<From> && std::is_unsigned_v<To>;
}
} // namespace detail

View File

@ -780,13 +780,13 @@ public:
};
template <typename T>
typename std::enable_if_t<!is_complex<T>::value&& std::is_unsigned<T>::value, T>
typename std::enable_if_t<!is_complex<T>::value&& std::is_unsigned_v<T>, T>
correctEpsilon(const T& eps)
{
return eps;
}
template <typename T>
typename std::enable_if_t<!is_complex<T>::value && !std::is_unsigned<T>::value, T>
typename std::enable_if_t<!is_complex<T>::value && !std::is_unsigned_v<T>, T>
correctEpsilon(const T& eps)
{
return std::abs(eps);

View File

@ -11,7 +11,7 @@ class DeviceArray {
public:
DeviceArray(c10::Allocator& allocator, size_t size)
: data_ptr_(allocator.allocate(size * sizeof(T))) {
static_assert(std::is_trivial<T>::value, "T must be a trivial type");
static_assert(std::is_trivial_v<T>, "T must be a trivial type");
TORCH_INTERNAL_ASSERT(
0 == (reinterpret_cast<intptr_t>(data_ptr_.get()) % alignof(T)),
"c10::DeviceArray: Allocated memory is not aligned for this data type");

View File

@ -2322,7 +2322,7 @@ struct C10_API TensorImpl : public c10::intrusive_ptr_target {
// Check it here statically - otherwise TypeMeta would throw the runtime
// error in attempt to invoke TypeMeta::ctor()
static_assert(
std::is_default_constructible<T>::value,
std::is_default_constructible_v<T>,
"Tensor can't hold non-default-constructable types");
return static_cast<T*>(raw_mutable_data(caffe2::TypeMeta::Make<T>()));
}

View File

@ -37,17 +37,13 @@ using testutils::expectThrows;
using testutils::string_equal;
namespace test_typedefs {
static_assert(std::is_same<char, string_view::value_type>::value, "");
static_assert(std::is_same<char*, string_view::pointer>::value, "");
static_assert(std::is_same<const char*, string_view::const_pointer>::value, "");
static_assert(std::is_same<char&, string_view::reference>::value, "");
static_assert(
std::is_same<const char&, string_view::const_reference>::value,
"");
static_assert(std::is_same<std::size_t, string_view::size_type>::value, "");
static_assert(
std::is_same<std::ptrdiff_t, string_view::difference_type>::value,
"");
static_assert(std::is_same_v<char, string_view::value_type>, "");
static_assert(std::is_same_v<char*, string_view::pointer>, "");
static_assert(std::is_same_v<const char*, string_view::const_pointer>, "");
static_assert(std::is_same_v<char&, string_view::reference>, "");
static_assert(std::is_same_v<const char&, string_view::const_reference>, "");
static_assert(std::is_same_v<std::size_t, string_view::size_type>, "");
static_assert(std::is_same_v<std::ptrdiff_t, string_view::difference_type>, "");
} // namespace test_typedefs
namespace test_default_constructor {

View File

@ -114,7 +114,7 @@ class ArrayRef final {
/* implicit */ ArrayRef(const std::vector<T, A>& Vec)
: Data(Vec.data()), Length(Vec.size()) {
static_assert(
!std::is_same<T, bool>::value,
!std::is_same_v<T, bool>,
"ArrayRef<bool> cannot be constructed from a std::vector<bool> bitfield.");
}

View File

@ -842,7 +842,7 @@ class SmallVectorImpl : public SmallVectorTemplateBase<T> {
// If we just moved the element we're inserting, be sure to update
// the reference (never happens if TakesParamByValue).
static_assert(
!TakesParamByValue || std::is_same<ArgType, T>::value,
!TakesParamByValue || std::is_same_v<ArgType, T>,
"ArgType must be 'T' when taking by value!");
if (!TakesParamByValue && this->isReferenceToRange(EltPtr, I, this->end()))
++EltPtr;

View File

@ -232,7 +232,7 @@ class intrusive_ptr final {
// the target class T to be fully defined when intrusive_ptr<T> is instantiated
// this is a problem for classes that contain pointers to themselves
// static_assert(
// std::is_base_of<intrusive_ptr_target, TTarget>::value,
// std::is_base_of_v<intrusive_ptr_target, TTarget>,
// "intrusive_ptr can only be used for classes that inherit from
// intrusive_ptr_target.");
#ifndef _WIN32
@ -354,7 +354,7 @@ class intrusive_ptr final {
: target_(
detail::assign_ptr_<TTarget, NullType, FromNullType>(rhs.target_)) {
static_assert(
std::is_convertible<From*, TTarget*>::value,
std::is_convertible_v<From*, TTarget*>,
"Type mismatch. intrusive_ptr move constructor got pointer of wrong type.");
rhs.target_ = FromNullType::singleton();
}
@ -368,7 +368,7 @@ class intrusive_ptr final {
: target_(
detail::assign_ptr_<TTarget, NullType, FromNullType>(rhs.target_)) {
static_assert(
std::is_convertible<From*, TTarget*>::value,
std::is_convertible_v<From*, TTarget*>,
"Type mismatch. intrusive_ptr copy constructor got pointer of wrong type.");
retain_();
}
@ -385,7 +385,7 @@ class intrusive_ptr final {
template <class From, class FromNullType>
intrusive_ptr& operator=(intrusive_ptr<From, FromNullType>&& rhs) & noexcept {
static_assert(
std::is_convertible<From*, TTarget*>::value,
std::is_convertible_v<From*, TTarget*>,
"Type mismatch. intrusive_ptr move assignment got pointer of wrong type.");
intrusive_ptr tmp = std::move(rhs);
swap(tmp);
@ -404,7 +404,7 @@ class intrusive_ptr final {
intrusive_ptr& operator=(
const intrusive_ptr<From, NullType>& rhs) & noexcept {
static_assert(
std::is_convertible<From*, TTarget*>::value,
std::is_convertible_v<From*, TTarget*>,
"Type mismatch. intrusive_ptr copy assignment got pointer of wrong type.");
intrusive_ptr tmp = rhs;
swap(tmp);
@ -743,7 +743,7 @@ class weak_intrusive_ptr final {
: target_(
detail::assign_ptr_<TTarget, NullType, FromNullType>(rhs.target_)) {
static_assert(
std::is_convertible<From*, TTarget*>::value,
std::is_convertible_v<From*, TTarget*>,
"Type mismatch. weak_intrusive_ptr move constructor got pointer of wrong type.");
rhs.target_ = FromNullType::singleton();
}
@ -758,7 +758,7 @@ class weak_intrusive_ptr final {
: target_(
detail::assign_ptr_<TTarget, NullType, FromNullType>(rhs.target_)) {
static_assert(
std::is_convertible<From*, TTarget*>::value,
std::is_convertible_v<From*, TTarget*>,
"Type mismatch. weak_intrusive_ptr copy constructor got pointer of wrong type.");
retain_();
}
@ -776,7 +776,7 @@ class weak_intrusive_ptr final {
weak_intrusive_ptr& operator=(
weak_intrusive_ptr<From, FromNullType>&& rhs) & noexcept {
static_assert(
std::is_convertible<From*, TTarget*>::value,
std::is_convertible_v<From*, TTarget*>,
"Type mismatch. weak_intrusive_ptr move assignment got pointer of wrong type.");
weak_intrusive_ptr tmp = std::move(rhs);
swap(tmp);
@ -802,7 +802,7 @@ class weak_intrusive_ptr final {
weak_intrusive_ptr& operator=(
const weak_intrusive_ptr<From, NullType>& rhs) & noexcept {
static_assert(
std::is_convertible<From*, TTarget*>::value,
std::is_convertible_v<From*, TTarget*>,
"Type mismatch. weak_intrusive_ptr copy assignment got pointer of wrong type.");
weak_intrusive_ptr tmp = rhs;
swap(tmp);

View File

@ -46,7 +46,7 @@ namespace strong
namespace impl
{
template <typename T, typename ... V>
using WhenConstructible = std::enable_if_t<std::is_constructible<T, V...>::value>;
using WhenConstructible = std::enable_if_t<std::is_constructible_v<T, V...>>;
}
template <typename M, typename T>
@ -101,18 +101,18 @@ public:
{
}
template <typename ... U,
typename = std::enable_if_t<std::is_constructible<T, U&&...>::value && (sizeof...(U) > 0)>>
typename = std::enable_if_t<std::is_constructible_v<T, U&&...> && (sizeof...(U) > 0)>>
constexpr
explicit
type(
U&& ... u)
noexcept(std::is_nothrow_constructible<T, U...>::value)
noexcept(std::is_nothrow_constructible_v<T, U...>)
: val(std::forward<U>(u)...)
{}
friend constexpr void swap(type& a, type& b) noexcept(
std::is_nothrow_move_constructible<T>::value &&
std::is_nothrow_move_assignable<T>::value
std::is_nothrow_move_constructible_v<T> &&
std::is_nothrow_move_assignable_v<T>
)
{
using std::swap;
@ -820,7 +820,7 @@ class affine_point<D>::modifier<::strong::type<T, Tag, M...>>
using base_diff_type = decltype(std::declval<const T&>() - std::declval<const T&>());
public:
using difference = std::conditional_t<std::is_same<D, void>{}, strong::type<base_diff_type, Tag, strong::difference>, D>;
static_assert(std::is_constructible<difference, base_diff_type>::value, "");
static_assert(std::is_constructible_v<difference, base_diff_type>, "");
[[nodiscard]]
friend
constexpr

View File

@ -129,7 +129,7 @@ static bool EmbeddingLookupGenericSlowIdx(
const float* scale_bias, \
bool normalize_by_lengths, \
OutType* out) { \
if constexpr (std::is_same<InType, uint8_t>::value) { \
if constexpr (std::is_same_v<InType, uint8_t>) { \
CAFFE_ENFORCE(scale_bias != nullptr, "scale_bias must not be nullptr"); \
} else { \
CAFFE_ENFORCE(scale_bias == nullptr, "scale_bias must be nullptr"); \

View File

@ -68,8 +68,7 @@ template <typename Module, typename ExpectedType, typename... Args>
void assert_has_expected_type() {
using ReturnType =
typename torch::detail::return_type_of_forward<Module, Args...>::type;
constexpr bool is_expected_type =
std::is_same<ReturnType, ExpectedType>::value;
constexpr bool is_expected_type = std::is_same_v<ReturnType, ExpectedType>;
ASSERT_TRUE(is_expected_type) << Module().name();
}

View File

@ -2395,7 +2395,7 @@ class CppPythonBindingsCodeCache(CppCodeCache):
static void* (*_torchinductor_pyobject_tensor_data_ptr)(PyObject* obj);
template <typename T> static inline T parse_arg(PyObject* args, size_t n) {
static_assert(std::is_pointer<T>::value, "arg type must be pointer or long");
static_assert(std::is_pointer_v<T>, "arg type must be pointer or long");
return static_cast<T>(_torchinductor_pyobject_tensor_data_ptr(PyTuple_GET_ITEM(args, n)));
}
template <> inline int64_t parse_arg<int64_t>(PyObject* args, size_t n) {

View File

@ -537,7 +537,7 @@ Welford<scalar_t> welford_vec_reduce_all(Welford<at::vec::VectorizedN<scalar_t,
#endif
template <typename T, typename U> inline typename std::common_type<T, U>::type mod(T a, U b) { return a % b; }
template <typename T, typename U> inline typename std::common_type_t<T, U> mod(T a, U b) { return a % b; }
template <> inline float mod(float a, float b) { return std::fmod(a, b); }
template <> inline double mod(double a, double b) { return std::fmod(a, b); }

View File

@ -205,7 +205,7 @@ template <typename ModuleType>
AnyModule::AnyModule(std::shared_ptr<ModuleType> module)
: content_(make_holder(
std::move(module),
&std::remove_reference<ModuleType>::type::forward)) {
&std::remove_reference_t<ModuleType>::forward)) {
// `AnyModule` can only store an `nn::Module` subclass object that provides
// a `forward()` method that has a non-templatized return type.
// (e.g. `AnyModule` cannot store `nn::Sequential`, because `nn::Sequential`'s

View File

@ -39,7 +39,7 @@ class NamedAnyModule {
/// Creates a `NamedAnyModule` from a `Module`, moving or copying it
/// into a `shared_ptr` internally.
// NOTE: We need to use `std::remove_reference<M>::type` to get rid of
// NOTE: We need to use `std::remove_reference_t<M>` to get rid of
// any reference components for make_unique.
template <typename M, typename = torch::detail::enable_if_module_t<M>>
NamedAnyModule(std::string name, M&& module)

View File

@ -61,7 +61,7 @@ class MiniArrayRef final {
/* implicit */ MiniArrayRef(const std::vector<T, A>& Vec)
: Data(Vec.data()), Length(Vec.size()) {
static_assert(
!std::is_same<T, bool>::value,
!std::is_same_v<T, bool>,
"MiniArrayRef<bool> cannot be constructed from a std::vector<bool> bitfield.");
}

View File

@ -89,7 +89,7 @@ std::function<void(Stack&)> getExecuteFunc() {
template <class TBackendInterface>
class backend {
static_assert(
std::is_base_of<PyTorchBackendInterface, TBackendInterface>::value,
std::is_base_of_v<PyTorchBackendInterface, TBackendInterface>,
"torch::jit::backend<T> requires T to inherit from PyTorchBackendInterface");
std::string backend_name_;

View File

@ -64,7 +64,7 @@ struct ArgumentInfo {
};
static_assert(
std::is_standard_layout<ArgumentInfo>::value,
std::is_standard_layout_v<ArgumentInfo>,
"ArgumentInfo is to be a POD struct");
static_assert(
sizeof(ArgumentInfo) == sizeof(ArgumentInfo::plain_data_type),
@ -106,7 +106,7 @@ struct ArgumentSpec {
at::Device device = t->device();
arg.dev_type_ =
// NOLINTNEXTLINE(bugprone-signed-char-misuse)
static_cast<std::underlying_type<DeviceType>::type>(device.type());
static_cast<std::underlying_type_t<DeviceType>>(device.type());
// NOLINTNEXTLINE(bugprone-signed-char-misuse)
arg.device_ = device.index();
arg.type_ = static_cast<unsigned>(t->scalar_type());
@ -266,8 +266,8 @@ struct CompleteArgumentSpec {
pod.type = static_cast<int>(t.scalar_type());
at::Device device = t.device();
// NOLINTNEXTLINE(bugprone-signed-char-misuse)
pod.dev_type = static_cast<std::underlying_type<DeviceType>::type>(
device.type());
pod.dev_type =
static_cast<std::underlying_type_t<DeviceType>>(device.type());
// NOLINTNEXTLINE(bugprone-signed-char-misuse)
pod.device = device.index();
pod.requires_grad = with_grad && t.requires_grad();

View File

@ -1078,7 +1078,7 @@ template <typename T>
std::enable_if_t<std::is_integral_v<T>, llvm::Value*> getFromType(
llvm::Type* type,
T value) {
return llvm::ConstantInt::get(type, value, std::is_signed<T>::value);
return llvm::ConstantInt::get(type, value, std::is_signed_v<T>);
}
template <typename T>

View File

@ -297,7 +297,7 @@ static void registerXpuDeviceProperties(PyObject* module) {
break;
default:
stream << "unknown device type:"
<< static_cast<typename std::underlying_type<device_type>::type>(
<< static_cast<typename std::underlying_type_t<device_type>>(
prop.device_type);
break;
}

View File

@ -691,7 +691,7 @@ inline {sig.decl()} {{
if has_symint:
result += f"""
namespace symint {{
template <typename T, typename = std::enable_if_t<std::is_same<T, {intlike_t}>::value>>
template <typename T, typename = std::enable_if_t<std::is_same_v<T, {intlike_t}>>>
{sig.decl(suppress_symint_suffix=True)} {{
return at::_ops::{f.func.name.unambiguous_name()}::call({exprs_str});
}}