mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
[2/N] Fixes clang-tidy warnings in header files (#113727)
This PR fixes more clang-tidy warnings in common headers. Pull Request resolved: https://github.com/pytorch/pytorch/pull/113727 Approved by: https://github.com/Skylion007
This commit is contained in:
@ -163,7 +163,7 @@ public:
|
||||
static_assert(std::is_constructible<KernelFunctor, ConstructorParameters...>::value, "Wrong argument list for constructor of kernel functor. The arguments to kernel<Functor>(arguments...) must match one of the constructors of Functor.");
|
||||
|
||||
return std::move(*this).kernel(
|
||||
std::move(dispatch_key),
|
||||
dispatch_key,
|
||||
KernelFunction::makeFromUnboxedFunctor<false, KernelFunctor>(std::make_unique<KernelFunctor>(std::forward<ConstructorParameters>(constructorParameters)...)),
|
||||
impl::CppSignature::make<KernelFunctor>(),
|
||||
detail::inferFunctionSchemaFromFunctor<KernelFunctor>()
|
||||
@ -243,7 +243,7 @@ public:
|
||||
static_assert(kernel_func != nullptr, "Kernel function cannot be nullptr");
|
||||
|
||||
return std::move(*this).kernel(
|
||||
std::move(dispatch_key),
|
||||
dispatch_key,
|
||||
KernelFunction::makeFromUnboxedFunction(TORCH_FN(kernel_func)),
|
||||
impl::CppSignature::make<FuncType>(),
|
||||
// TODO Do schema inference without relying on WrapFunctionIntoFunctor
|
||||
@ -287,7 +287,7 @@ public:
|
||||
TORCH_INTERNAL_ASSERT(kernel_func != nullptr, "Kernel function cannot be nullptr");
|
||||
|
||||
return std::move(*this).kernel(
|
||||
std::move(dispatch_key),
|
||||
dispatch_key,
|
||||
KernelFunction::makeFromUnboxedRuntimeFunction(kernel_func),
|
||||
impl::CppSignature::make<FuncType>(),
|
||||
// TODO Do schema inference without relying on WrapFunctionIntoFunctor
|
||||
@ -343,7 +343,7 @@ public:
|
||||
static_assert(guts::is_stateless_lambda<std::decay_t<Lambda>>::value, "The kernel(x) API for registering a kernel only works for stateless lambdas (i.e. lambdas without captures). If you need a cache, please use the functor based API kernel<Functor>() instead.");
|
||||
|
||||
return std::move(*this).kernel(
|
||||
std::move(dispatch_key),
|
||||
dispatch_key,
|
||||
KernelFunction::makeFromUnboxedLambda(std::forward<Lambda>(functor)),
|
||||
impl::CppSignature::make<Lambda>(),
|
||||
// TODO Do schema inference without relying on WrapFunctionIntoRuntimeFunctor
|
||||
@ -403,7 +403,7 @@ public:
|
||||
KernelRegistrationConfig config;
|
||||
config.dispatch_key = dispatch_key;
|
||||
config.func = std::move(func);
|
||||
config.cpp_signature = std::move(cpp_signature);
|
||||
config.cpp_signature = cpp_signature;
|
||||
config.inferred_function_schema = std::move(inferred_function_schema);
|
||||
kernels.push_back(std::move(config));
|
||||
return std::move(*this);
|
||||
|
||||
@ -97,7 +97,7 @@ struct ObserverContext {
|
||||
virtual ~ObserverContext() = default;
|
||||
|
||||
protected:
|
||||
ObserverContext() {}
|
||||
ObserverContext() = default;
|
||||
};
|
||||
|
||||
typedef c10::SmallVector<uint64_t, kSoftLimitCallbacks> CallbackHandles;
|
||||
|
||||
@ -177,7 +177,7 @@ class C10_CUDA_API CUDAStream {
|
||||
// Note: this returns the range of priority **supported by PyTorch**, not
|
||||
// the range of priority **supported by CUDA**. The former is a subset of
|
||||
// the latter.
|
||||
int least_priority, greatest_priority;
|
||||
int least_priority = 0, greatest_priority = 0;
|
||||
C10_CUDA_CHECK(
|
||||
cudaDeviceGetStreamPriorityRange(&least_priority, &greatest_priority));
|
||||
#ifdef USE_ROCM
|
||||
|
||||
@ -210,7 +210,7 @@ class C10_API WarningHandlerGuard {
|
||||
/// setWarnAlways(true) to turn it into TORCH_WARN, which can be
|
||||
/// tested for more easily.
|
||||
C10_API void set_warnAlways(bool) noexcept(true);
|
||||
C10_API bool get_warnAlways(void) noexcept(true);
|
||||
C10_API bool get_warnAlways() noexcept(true);
|
||||
|
||||
// A RAII guard that sets warn_always (not thread-local) on
|
||||
// construction, and sets it back to the original value upon destruction.
|
||||
|
||||
@ -13,9 +13,8 @@ namespace c10 {
|
||||
|
||||
/// Constructors
|
||||
|
||||
inline C10_HOST_DEVICE Float8_e4m3fn::Float8_e4m3fn(float value) {
|
||||
x = detail::fp8e4m3fn_from_fp32_value(value);
|
||||
}
|
||||
inline C10_HOST_DEVICE Float8_e4m3fn::Float8_e4m3fn(float value)
|
||||
: x(detail::fp8e4m3fn_from_fp32_value(value)) {}
|
||||
|
||||
/// Implicit conversions
|
||||
|
||||
|
||||
@ -17,9 +17,8 @@ namespace c10 {
|
||||
|
||||
/// Constructors
|
||||
|
||||
inline C10_HOST_DEVICE Float8_e5m2::Float8_e5m2(float value) {
|
||||
x = detail::fp8e5m2_from_fp32_value(value);
|
||||
}
|
||||
inline C10_HOST_DEVICE Float8_e5m2::Float8_e5m2(float value)
|
||||
: x(detail::fp8e5m2_from_fp32_value(value)) {}
|
||||
|
||||
/// Implicit conversions
|
||||
|
||||
|
||||
@ -36,7 +36,7 @@ class function_ref;
|
||||
template <typename Ret, typename... Params>
|
||||
class function_ref<Ret(Params...)> {
|
||||
Ret (*callback)(intptr_t callable, Params... params) = nullptr;
|
||||
intptr_t callable;
|
||||
intptr_t callable{};
|
||||
|
||||
template <typename Callable>
|
||||
static Ret callback_fn(intptr_t callable, Params... params) {
|
||||
@ -55,7 +55,7 @@ class function_ref<Ret(Params...)> {
|
||||
typename std::remove_reference<Callable>::type,
|
||||
function_ref>::value>::type* = nullptr,
|
||||
typename std::enable_if<std::is_convertible<
|
||||
typename c10::invoke_result_t<Callable, Params...>,
|
||||
typename std::invoke_result_t<Callable, Params...>,
|
||||
Ret>::value>::type* = nullptr)
|
||||
: callback(callback_fn<typename std::remove_reference<Callable>::type>),
|
||||
callable(reinterpret_cast<intptr_t>(&callable)) {}
|
||||
|
||||
@ -15,7 +15,7 @@ struct LoadImpl {
|
||||
template <>
|
||||
struct LoadImpl<bool> {
|
||||
C10_HOST_DEVICE static bool apply(const void* src) {
|
||||
static_assert(sizeof(bool) == sizeof(char), "");
|
||||
static_assert(sizeof(bool) == sizeof(char));
|
||||
// NOTE: [Loading boolean values]
|
||||
// Protect against invalid boolean values by loading as a byte
|
||||
// first, then converting to bool (see gh-54789).
|
||||
|
||||
@ -674,7 +674,7 @@ PyObject* THCPModule_memorySnapshot(PyObject* _unused, PyObject* noargs) {
|
||||
std::vector<py::dict> to_gather_dest;
|
||||
|
||||
auto add_frame_key = [&](const py::dict& d,
|
||||
const std::shared_ptr<c10::GatheredContext> ctx) {
|
||||
const std::shared_ptr<c10::GatheredContext>& ctx) {
|
||||
if (ctx) {
|
||||
auto sc = getFromContext(ctx);
|
||||
to_gather_frames.emplace_back(sc);
|
||||
@ -1352,13 +1352,13 @@ PyObject* THCPModule_setBenchmarkLimitCuDNN(PyObject* _unused, PyObject* arg) {
|
||||
"set_benchmark_limit_cudnn expects an int, "
|
||||
"but got %s",
|
||||
THPUtils_typename(arg));
|
||||
auto benchmark_limit = static_cast<int>(THPUtils_unpackLong(arg));
|
||||
#if defined(USE_ROCM)
|
||||
TORCH_WARN_ONCE(
|
||||
"cuDNN Benchmark limit is not supported in MIOpen and will have no effect.");
|
||||
#endif
|
||||
#if AT_CUDNN_ENABLED()
|
||||
#if HAS_CUDNN_V8()
|
||||
auto benchmark_limit = static_cast<int>(THPUtils_unpackLong(arg));
|
||||
at::globalContext().setBenchmarkLimitCuDNN(benchmark_limit);
|
||||
#else
|
||||
TORCH_WARN_ONCE(
|
||||
|
||||
@ -23,11 +23,13 @@ struct SourceRange;
|
||||
struct TORCH_API StringCordView {
|
||||
StringCordView();
|
||||
StringCordView(const StringCordView&) = default;
|
||||
StringCordView(StringCordView&&) noexcept = default;
|
||||
StringCordView(
|
||||
std::vector<c10::string_view> inputs,
|
||||
std::vector<std::shared_ptr<std::string>> ownerships);
|
||||
|
||||
StringCordView& operator=(const StringCordView&) = default;
|
||||
StringCordView& operator=(StringCordView&&) noexcept = default;
|
||||
|
||||
size_t size() const {
|
||||
return accumulated_sizes_.back();
|
||||
@ -212,7 +214,7 @@ struct TORCH_API Source {
|
||||
c10::optional<std::string> filename = c10::nullopt,
|
||||
size_t starting_line_no = 0,
|
||||
std::shared_ptr<SourceRangeUnpickler> gen_ranges = nullptr)
|
||||
: text_view_(str),
|
||||
: text_view_(std::move(str)),
|
||||
filename_(std::move(filename)),
|
||||
starting_line_no_(starting_line_no),
|
||||
gen_ranges_(std::move(gen_ranges)) {
|
||||
|
||||
@ -15,7 +15,7 @@ class TORCH_API SerializationStorageContext {
|
||||
delete;
|
||||
SerializationStorageContext(const SerializationStorageContext&) = delete;
|
||||
|
||||
uint64_t getOrAddStorage(c10::Storage storage) {
|
||||
uint64_t getOrAddStorage(const c10::Storage& storage) {
|
||||
if (!hasStorage(storage)) {
|
||||
uint64_t size = storage_id_map_.size();
|
||||
storage_id_map_[storage] = size;
|
||||
@ -23,7 +23,7 @@ class TORCH_API SerializationStorageContext {
|
||||
return storage_id_map_[storage];
|
||||
}
|
||||
|
||||
bool hasStorage(c10::Storage storage) {
|
||||
bool hasStorage(const c10::Storage& storage) {
|
||||
return storage_id_map_.find(storage) != storage_id_map_.end();
|
||||
}
|
||||
|
||||
@ -62,9 +62,9 @@ class TORCH_API DeserializationStorageContext {
|
||||
const DeserializationStorageContext&) = delete;
|
||||
DeserializationStorageContext(const DeserializationStorageContext&) = delete;
|
||||
|
||||
void addStorage(const std::string& name, c10::Storage storage) {
|
||||
void addStorage(std::string name, c10::Storage storage) {
|
||||
TORCH_INTERNAL_ASSERT(!hasStorage(name));
|
||||
name_storage_map_.insert({name, storage});
|
||||
name_storage_map_.emplace(std::move(name), std::move(storage));
|
||||
}
|
||||
|
||||
bool hasStorage(const std::string& name) {
|
||||
|
||||
Reference in New Issue
Block a user