mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 12:54:11 +08:00
Enable more readability-redundant checks (#143963)
They are helpful to simplifying code. Pull Request resolved: https://github.com/pytorch/pytorch/pull/143963 Approved by: https://github.com/albanD
This commit is contained in:
@ -55,10 +55,12 @@ readability-container-size-empty,
|
||||
readability-delete-null-pointer,
|
||||
readability-duplicate-include
|
||||
readability-misplaced-array-index,
|
||||
readability-redundant-function-ptr-dereference,
|
||||
readability-redundant-smartptr-get,
|
||||
readability-redundant*
|
||||
readability-simplify-subscript-expr,
|
||||
readability-string-compare,
|
||||
-readability-redundant-access-specifiers,
|
||||
-readability-redundant-control-flow,
|
||||
-readability-redundant-declaration,
|
||||
'
|
||||
HeaderFilterRegex: '^(aten/|c10/|torch/).*$'
|
||||
WarningsAsErrors: '*'
|
||||
|
@ -11,7 +11,7 @@ C10_EXPORT c10::intrusive_ptr<c10::StorageImpl> new_shm_fd_storage(
|
||||
ALLOCATOR_MAPPED_KEEPFD | ALLOCATOR_MAPPED_UNLINK;
|
||||
std::string handle = NewProcessWideShmHandle();
|
||||
auto sptr = MapAllocator::makeDataPtr(
|
||||
handle.c_str(), flags, size * sizeof(uint8_t), nullptr);
|
||||
handle, flags, size * sizeof(uint8_t), nullptr);
|
||||
return c10::make_intrusive<StorageImpl>(
|
||||
c10::StorageImpl::use_byte_size_t(),
|
||||
size,
|
||||
|
@ -87,7 +87,7 @@ bool APIVitals::setVital(
|
||||
return true;
|
||||
}
|
||||
|
||||
APIVitals::APIVitals() : vitals_enabled(false), name_map_() {
|
||||
APIVitals::APIVitals() : vitals_enabled(false) {
|
||||
// Set default values, force is necessary because in unit tests the env
|
||||
// variable may not be set when global APIVitals are constructed.
|
||||
setVital("CUDA", "used", "False", /* force = */ true);
|
||||
|
@ -11,7 +11,7 @@ TORCH_API bool torchVitalEnabled();
|
||||
|
||||
struct TORCH_API TorchVitalAttr {
|
||||
// always initialized to empty
|
||||
std::string value = "";
|
||||
std::string value;
|
||||
template <typename T>
|
||||
TorchVitalAttr& operator<<(const T& t) {
|
||||
if (torchVitalEnabled()) {
|
||||
|
@ -22,7 +22,7 @@ class TORCH_API Blob final : public c10::intrusive_ptr_target {
|
||||
/**
|
||||
* Initializes an empty Blob.
|
||||
*/
|
||||
Blob() noexcept : meta_() {}
|
||||
Blob() noexcept = default;
|
||||
~Blob() override {
|
||||
Reset();
|
||||
}
|
||||
|
@ -76,7 +76,7 @@ std::string ClassType::getForwardPreHookErrorMessage(size_t pre_hook_idx) const
|
||||
std::string input_types = getSchemaInputTypesString(forward_schema);
|
||||
const std::vector<Argument>& forward_args = forward_schema.arguments();
|
||||
|
||||
std::string single_output = "";
|
||||
std::string single_output;
|
||||
if (forward_args.size() == 2 &&
|
||||
forward_args[1].type()->cast<TupleType>() == nullptr) {
|
||||
// if the output type is a single tuple, it needs to be wrapped in an outer tuple
|
||||
|
@ -432,7 +432,7 @@ struct TORCH_API ClassType : public NamedType {
|
||||
bool isModule_ = false;
|
||||
|
||||
// Doc string of class.
|
||||
std::string doc_string_ = "";
|
||||
std::string doc_string_;
|
||||
|
||||
// For error reporting accesses to class level attributes.
|
||||
std::vector<std::string> unresolved_class_attributes_;
|
||||
|
@ -43,7 +43,7 @@ struct TORCH_API Function {
|
||||
Function(Function&&) noexcept = default;
|
||||
Function& operator=(Function&&) noexcept = default;
|
||||
virtual std::string_view doc_string() const {
|
||||
static constexpr std::string_view no_doc_string = "";
|
||||
static constexpr std::string_view no_doc_string;
|
||||
return no_doc_string;
|
||||
}
|
||||
|
||||
|
@ -567,7 +567,7 @@ inline std::ostream& operator<<(std::ostream& out, const Argument& arg) {
|
||||
if (arg.alias_info() && !arg.alias_info()->containedTypes().empty()){
|
||||
out << arg.alias_info()->containedTypes()[0];
|
||||
}
|
||||
std::string N = "";
|
||||
std::string N;
|
||||
if (arg.N()) {
|
||||
N = std::to_string(*arg.N());
|
||||
}
|
||||
|
@ -2245,7 +2245,7 @@ static const TypeKind Kind = TypeKind::ScalarTypeType;
|
||||
static ScalarTypeTypePtr get();
|
||||
|
||||
private:
|
||||
ScalarTypeType() : EnumerationType() {}
|
||||
ScalarTypeType() {}
|
||||
};
|
||||
|
||||
struct MemoryFormatType;
|
||||
@ -2259,7 +2259,7 @@ static const TypeKind Kind = TypeKind::MemoryFormatType;
|
||||
static MemoryFormatTypePtr get();
|
||||
|
||||
private:
|
||||
MemoryFormatType() : EnumerationType() {}
|
||||
MemoryFormatType() {}
|
||||
};
|
||||
|
||||
struct LayoutType;
|
||||
@ -2273,7 +2273,7 @@ static const TypeKind Kind = TypeKind::LayoutType;
|
||||
static LayoutTypePtr get();
|
||||
|
||||
private:
|
||||
LayoutType() : EnumerationType() {}
|
||||
LayoutType() {}
|
||||
};
|
||||
|
||||
namespace detail {
|
||||
|
@ -48,7 +48,6 @@ CppFunction::CppFunction(c10::KernelFunction func, std::optional<c10::impl::CppS
|
||||
: func_(std::move(func))
|
||||
, cpp_signature_(cpp_signature)
|
||||
, schema_(std::move(schema))
|
||||
, debug_()
|
||||
{}
|
||||
|
||||
CppFunction::~CppFunction() = default;
|
||||
|
@ -31,7 +31,7 @@ class C10_API DataPtr {
|
||||
public:
|
||||
// Choice of CPU here is arbitrary; if there's an "undefined" device
|
||||
// we could use that too
|
||||
DataPtr() : ptr_(), device_(DeviceType::CPU) {}
|
||||
DataPtr() : device_(DeviceType::CPU) {}
|
||||
DataPtr(void* data, Device device) : ptr_(data), device_(device) {}
|
||||
DataPtr(void* data, void* ctx, DeleterFnPtr ctx_deleter, Device device)
|
||||
: ptr_(data, ctx, ctx_deleter), device_(device) {}
|
||||
|
@ -18,8 +18,7 @@ CUDAAllocatorConfig::CUDAAllocatorConfig()
|
||||
m_expandable_segments(false),
|
||||
m_release_lock_on_cudamalloc(false),
|
||||
m_pinned_use_cuda_host_register(false),
|
||||
m_pinned_use_background_threads(false),
|
||||
m_last_allocator_settings("") {
|
||||
m_pinned_use_background_threads(false) {
|
||||
m_roundup_power2_divisions.assign(kRoundUpPowerOfTwoIntervals, 0);
|
||||
}
|
||||
|
||||
|
@ -211,7 +211,6 @@ struct Block {
|
||||
void* ptr)
|
||||
: device(device),
|
||||
stream(stream),
|
||||
stream_uses(),
|
||||
size(size),
|
||||
requested_size(0),
|
||||
pool(pool),
|
||||
@ -219,11 +218,7 @@ struct Block {
|
||||
|
||||
// constructor for search key
|
||||
Block(c10::DeviceIndex device, cudaStream_t stream, size_t size)
|
||||
: device(device),
|
||||
stream(stream),
|
||||
stream_uses(),
|
||||
size(size),
|
||||
requested_size(0) {}
|
||||
: device(device), stream(stream), size(size), requested_size(0) {}
|
||||
|
||||
size_t gc_count() {
|
||||
TORCH_INTERNAL_ASSERT(pool);
|
||||
|
@ -74,7 +74,7 @@ struct CUDAGuard {
|
||||
/// CUDAGuard for when you can use this.
|
||||
struct OptionalCUDAGuard {
|
||||
/// Create an uninitialized OptionalCUDAGuard.
|
||||
explicit OptionalCUDAGuard() : guard_() {}
|
||||
explicit OptionalCUDAGuard() = default;
|
||||
|
||||
/// Set the current CUDA device to the passed Device, if it is not nullopt.
|
||||
explicit OptionalCUDAGuard(std::optional<Device> device_opt)
|
||||
@ -208,7 +208,7 @@ struct CUDAStreamGuard {
|
||||
/// CUDAGuard for when you can use this.
|
||||
struct OptionalCUDAStreamGuard {
|
||||
/// Create an uninitialized guard.
|
||||
explicit OptionalCUDAStreamGuard() : guard_() {}
|
||||
explicit OptionalCUDAStreamGuard() = default;
|
||||
|
||||
/// Set the current CUDA device to the device associated with the passed
|
||||
/// stream, and set the current CUDA stream on that device to the passed
|
||||
|
@ -49,8 +49,8 @@ TEST(StringUtilTest, testStrWideSingleMultibyte) {
|
||||
|
||||
namespace test_str_wide_empty {
|
||||
TEST(StringUtilTest, testStrWideEmpty) {
|
||||
std::wstring s = L"";
|
||||
std::string narrow = "";
|
||||
std::wstring s;
|
||||
std::string narrow;
|
||||
EXPECT_EQ(narrow, c10::str(s));
|
||||
|
||||
const wchar_t* c_str = s.c_str();
|
||||
|
@ -57,8 +57,7 @@ class LeftRight final {
|
||||
: _counters{{{0}, {0}}},
|
||||
_foregroundCounterIndex(0),
|
||||
_foregroundDataIndex(0),
|
||||
_data{{T{args...}, T{args...}}},
|
||||
_writeMutex() {}
|
||||
_data{{T{args...}, T{args...}}} {}
|
||||
|
||||
// Copying and moving would not be threadsafe.
|
||||
// Needs more thought and careful design to make that work.
|
||||
|
@ -112,8 +112,6 @@ FatalSignalHandler::FatalSignalHandler()
|
||||
: fatalSignalHandlersInstalled(false),
|
||||
fatalSignalReceived(false),
|
||||
fatalSignalName("<UNKNOWN>"),
|
||||
writingCond(),
|
||||
writingMutex(),
|
||||
signalReceived(false) {}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
|
||||
|
@ -150,7 +150,6 @@ CudaIPCSentData::CudaIPCSentData(
|
||||
: handle_(std::move(handle)),
|
||||
offset_(offset),
|
||||
counter_ptr_(counter_ptr),
|
||||
original_ptr_(),
|
||||
device_(device) {
|
||||
#if !defined(USE_ROCM)
|
||||
// CUDA have the unofficial limit on the number of recorded blocking
|
||||
|
@ -126,8 +126,7 @@ struct TensorDataContainer {
|
||||
AT_FORALL_COMPLEX_TYPES(TENSOR)
|
||||
#undef TENSOR
|
||||
TensorDataContainer(std::initializer_list<TensorDataContainer> init_list)
|
||||
: sizes_(),
|
||||
scalar_type_(init_list.begin()->scalar_type()),
|
||||
: scalar_type_(init_list.begin()->scalar_type()),
|
||||
type_(TensorDataContainerType::InitList),
|
||||
init_list_(init_list) {
|
||||
const TensorDataContainer& first_elem = *(init_list.begin());
|
||||
|
@ -376,7 +376,7 @@ class Sequential : public torch::nn::ModuleHolder<SequentialImpl> {
|
||||
public:
|
||||
using torch::nn::ModuleHolder<SequentialImpl>::ModuleHolder;
|
||||
|
||||
Sequential() : ModuleHolder() {}
|
||||
Sequential() {}
|
||||
|
||||
/// Constructs the `Sequential` from a braced-init-list of named `AnyModule`s.
|
||||
/// It enables the following use case:
|
||||
|
@ -766,7 +766,7 @@ edge_list collect_next_edges(Variables&&... variables) {
|
||||
}
|
||||
|
||||
struct TypeAndSize {
|
||||
TypeAndSize() : options(at::TensorOptions()) {}
|
||||
TypeAndSize() {}
|
||||
/* implicit */
|
||||
TypeAndSize(const at::Tensor& t)
|
||||
: sym_sizes(t.sym_sizes().vec()), options(t.options()) {}
|
||||
|
@ -65,8 +65,7 @@ CopySlices::CopySlices(
|
||||
at::TensorGeometry view_,
|
||||
std::unique_ptr<ViewFunc> view_fn_,
|
||||
std::shared_ptr<Node> fn_)
|
||||
: Node(),
|
||||
base(base_var),
|
||||
: base(base_var),
|
||||
view(std::move(view_)),
|
||||
view_fn(std::move(view_fn_)),
|
||||
fn(std::move(fn_)) {
|
||||
|
@ -603,8 +603,7 @@ static PyTypeObject TraceContextType = {
|
||||
|
||||
class gil_and_restore_thread {
|
||||
public:
|
||||
gil_and_restore_thread()
|
||||
: gil_(), initial_thread_state_{PyThreadState_Get()} {}
|
||||
gil_and_restore_thread() : initial_thread_state_{PyThreadState_Get()} {}
|
||||
~gil_and_restore_thread() {
|
||||
PyThreadState_Swap(initial_thread_state_);
|
||||
|
||||
|
@ -108,8 +108,7 @@ void DistEngine::globalCpuThread(
|
||||
}
|
||||
|
||||
DistEngine::DistEngine()
|
||||
: initializedContextIds_(),
|
||||
engine_(Engine::get_default_engine()),
|
||||
: engine_(Engine::get_default_engine()),
|
||||
global_cpu_ready_queue_(std::make_shared<ReadyQueue>()),
|
||||
global_cpu_thread_(
|
||||
&DistEngine::globalCpuThread,
|
||||
|
@ -282,8 +282,7 @@ off_t refresh(
|
||||
} // namespace
|
||||
|
||||
FileStore::FileStore(std::string path, int numWorkers)
|
||||
: Store(),
|
||||
path_(std::move(path)),
|
||||
: path_(std::move(path)),
|
||||
|
||||
numWorkers_(numWorkers),
|
||||
cleanupKey_("cleanup/"),
|
||||
|
@ -147,7 +147,7 @@ uint64_t Work::getSequencenumber() const {
|
||||
class FutureWrappingWork : public Work {
|
||||
public:
|
||||
FutureWrappingWork(c10::intrusive_ptr<c10::ivalue::Future> fut)
|
||||
: Work(), _fut(std::move(fut)) {}
|
||||
: _fut(std::move(fut)) {}
|
||||
|
||||
~FutureWrappingWork() override = default;
|
||||
|
||||
|
@ -53,10 +53,7 @@ RRefForkData::RRefForkData(
|
||||
////////////////////////////// RRef /////////////////////////////////////
|
||||
|
||||
RRef::RRef(worker_id_t ownerId, const RRefId& rrefId, TypePtr type)
|
||||
: RRefInterface(),
|
||||
ownerId_(ownerId),
|
||||
rrefId_(rrefId),
|
||||
type_(std::move(type)) {}
|
||||
: ownerId_(ownerId), rrefId_(rrefId), type_(std::move(type)) {}
|
||||
|
||||
RRefForkData RRef::fork() const {
|
||||
auto& ctx = RRefContext::getInstance();
|
||||
|
@ -3109,7 +3109,7 @@ class TORCH_FUNCTION_MODE_STACK : public LeafGuard {
|
||||
TORCH_FUNCTION_MODE_STACK(
|
||||
const py::list& initial_stack,
|
||||
py::object verbose_code_parts)
|
||||
: LeafGuard(std::move(verbose_code_parts)), _ref_stack() {
|
||||
: LeafGuard(std::move(verbose_code_parts)) {
|
||||
Py_ssize_t len = PyList_Size(initial_stack.ptr());
|
||||
for (Py_ssize_t idx = 0; idx < len; idx++) {
|
||||
PyObject* mode = PyList_GetItem(initial_stack.ptr(), idx); // borrowed ref
|
||||
|
@ -21,7 +21,7 @@ struct AOTIKernelMetadata {
|
||||
std::vector<ParameterMetadata> parameter_metadata_list_;
|
||||
// AOTI model runner to run the AOTI kernel
|
||||
std::shared_ptr<AOTIModelContainerRunner> kernel_runner_;
|
||||
AOTIKernelMetadata() : parameter_metadata_list_(), kernel_runner_(nullptr) {}
|
||||
AOTIKernelMetadata() : kernel_runner_(nullptr) {}
|
||||
|
||||
// Check whether the given parameter metadata list is the same as the
|
||||
// parameter metadata list of the AOTI kernel.
|
||||
|
@ -91,7 +91,7 @@ std::tuple<std::string, std::string> get_cpp_compile_command(
|
||||
std::string compiler = compile_options["compiler"].get<std::string>();
|
||||
bool compile_only = compile_options["compile_only"].get<bool>();
|
||||
|
||||
std::string source_args = "";
|
||||
std::string source_args;
|
||||
for (const std::string& source : sources) {
|
||||
source_args += source + " ";
|
||||
}
|
||||
@ -99,37 +99,37 @@ std::tuple<std::string, std::string> get_cpp_compile_command(
|
||||
std::string file_ext = compile_only ? ".o" : ".so";
|
||||
std::string target_file = output_dir + filename + file_ext;
|
||||
|
||||
std::string cflags_args = "";
|
||||
std::string cflags_args;
|
||||
for (auto& arg : compile_options["cflags"]) {
|
||||
cflags_args += "-" + arg.get<std::string>() + " ";
|
||||
}
|
||||
|
||||
std::string definitions_args = "";
|
||||
std::string definitions_args;
|
||||
for (auto& arg : compile_options["definitions"]) {
|
||||
definitions_args += "-D " + arg.get<std::string>() + " ";
|
||||
}
|
||||
|
||||
std::string include_dirs_args = "";
|
||||
std::string include_dirs_args;
|
||||
for (auto& arg : compile_options["include_dirs"]) {
|
||||
include_dirs_args += "-I" + arg.get<std::string>() + " ";
|
||||
}
|
||||
|
||||
std::string ldflags_args = "";
|
||||
std::string ldflags_args;
|
||||
for (auto& arg : compile_options["ldflags"]) {
|
||||
ldflags_args += "-" + arg.get<std::string>() + " ";
|
||||
}
|
||||
|
||||
std::string libraries_dirs_args = "";
|
||||
std::string libraries_dirs_args;
|
||||
for (auto& arg : compile_options["libraries_dirs"]) {
|
||||
libraries_dirs_args += "-L" + arg.get<std::string>() + " ";
|
||||
}
|
||||
|
||||
std::string libraries_args = "";
|
||||
std::string libraries_args;
|
||||
for (auto& arg : compile_options["libraries"]) {
|
||||
libraries_args += "-l" + arg.get<std::string>() + " ";
|
||||
}
|
||||
|
||||
std::string passthrough_parameters_args = "";
|
||||
std::string passthrough_parameters_args;
|
||||
for (auto& arg : compile_options["passthrough_args"]) {
|
||||
passthrough_parameters_args += arg.get<std::string>() + " ";
|
||||
}
|
||||
@ -343,10 +343,10 @@ AOTIModelPackageLoader::AOTIModelPackageLoader(
|
||||
}
|
||||
|
||||
temp_dir_ = create_temp_dir();
|
||||
std::string so_filename = "";
|
||||
std::string cpp_filename = "";
|
||||
std::string consts_filename = "";
|
||||
std::string found_filenames = ""; // Saving for bookkeeping
|
||||
std::string so_filename;
|
||||
std::string cpp_filename;
|
||||
std::string consts_filename;
|
||||
std::string found_filenames; // Saving for bookkeeping
|
||||
std::string model_directory =
|
||||
"data" + k_separator + "aotinductor" + k_separator + model_name;
|
||||
|
||||
@ -379,7 +379,7 @@ AOTIModelPackageLoader::AOTIModelPackageLoader(
|
||||
"Failed to find parent path in " + output_path_str);
|
||||
}
|
||||
std::string parent_path = output_path_str.substr(0, parent_path_idx);
|
||||
if (!recursive_mkdir(parent_path.c_str())) {
|
||||
if (!recursive_mkdir(parent_path)) {
|
||||
throw std::runtime_error(fmt::format(
|
||||
"Failed to create directory {}: {}",
|
||||
parent_path,
|
||||
|
@ -29,7 +29,7 @@ class TORCH_API hash_t : public c10::uint128 {
|
||||
hash_t(uint64_t val) : uint128(val) {}
|
||||
hash_t(uint128 val) : uint128(val) {}
|
||||
hash_t(uint64_t top, uint64_t bottom) : uint128(top, bottom) {}
|
||||
hash_t() : uint128() {}
|
||||
hash_t() {}
|
||||
};
|
||||
|
||||
// Std* functions use 64-bit hash
|
||||
|
@ -410,7 +410,7 @@ convertIValue(
|
||||
size_t offset = 0;
|
||||
size_t numel = 0;
|
||||
size_t itemsize = 0;
|
||||
std::string device_str = "";
|
||||
std::string device_str;
|
||||
// symbolic sizes/strides implies t->storage_offset() will fail
|
||||
if (tensor_impl->has_storage() &&
|
||||
!tensor_impl->has_symbolic_sizes_strides()) {
|
||||
|
@ -111,7 +111,7 @@ struct Option {
|
||||
is_variadic(is_variadic),
|
||||
has_out(has_out) {}
|
||||
Option(bool is_variadic, bool has_out)
|
||||
: arguments(), is_variadic(is_variadic), has_out(has_out) {}
|
||||
: is_variadic(is_variadic), has_out(has_out) {}
|
||||
Option(const Option&) = delete;
|
||||
Option(Option&& other) noexcept = default;
|
||||
Option& operator=(const Option&) = delete;
|
||||
|
@ -58,7 +58,7 @@ struct BenchmarkConfig {
|
||||
// If set autograd profiler will be enabled. I.e. this variable would be
|
||||
// created before the main benchmark loop (but after the warmup):
|
||||
// RecordProfile guard(profiler_output_path);
|
||||
std::string profiler_output_path{""};
|
||||
std::string profiler_output_path;
|
||||
};
|
||||
|
||||
namespace detail {
|
||||
|
@ -119,8 +119,8 @@ class TORCH_API CppFunction final {
|
||||
: func_(c10::KernelFunction::makeFromUnboxedRuntimeFunction(f)),
|
||||
cpp_signature_(c10::impl::CppSignature::make<Func>()),
|
||||
schema_(
|
||||
c10::detail::inferFunctionSchemaFromFunctor<std::decay_t<Func>>()),
|
||||
debug_() {}
|
||||
c10::detail::inferFunctionSchemaFromFunctor<std::decay_t<Func>>())
|
||||
{}
|
||||
|
||||
/// This overload accepts compile time function pointers, e.g.,
|
||||
/// `CppFunction(TORCH_FN(add_impl))`
|
||||
@ -134,8 +134,8 @@ class TORCH_API CppFunction final {
|
||||
cpp_signature_(
|
||||
c10::impl::CppSignature::make<typename FuncPtr::FuncType>()),
|
||||
schema_(c10::detail::inferFunctionSchemaFromFunctor<
|
||||
typename FuncPtr::FuncType>()),
|
||||
debug_() {}
|
||||
typename FuncPtr::FuncType>())
|
||||
{}
|
||||
|
||||
/// This overload accepts lambdas, e.g., `CppFunction([](const Tensor& self) {
|
||||
/// ... })`
|
||||
@ -149,8 +149,8 @@ class TORCH_API CppFunction final {
|
||||
std::forward<Lambda>(f))),
|
||||
cpp_signature_(c10::impl::CppSignature::make<Lambda>()),
|
||||
schema_(c10::detail::inferFunctionSchemaFromFunctor<
|
||||
std::decay_t<Lambda>>()),
|
||||
debug_() {}
|
||||
std::decay_t<Lambda>>())
|
||||
{}
|
||||
|
||||
#if defined C10_MOBILE
|
||||
/// This overload accepts function pointers, e.g., `CppFunction(&add_impl,
|
||||
|
Reference in New Issue
Block a user