Fix header-filter for clang-tidy c10 and apply some fixes to c10 and … (#91178)

…c10d

Fixes a broken header filters from #90699 and applies a few more clang-tidy fixes that are relevant from c10 and c10d. The header filter pattern was actually broken and the clang-tidy include pattern was redundant. Also fixed a few bugs in torch/distributed/c10d

Pull Request resolved: https://github.com/pytorch/pytorch/pull/91178
Approved by: https://github.com/ezyang
This commit is contained in:
Aaron Gokaslan
2022-12-27 07:34:12 +00:00
committed by PyTorch MergeBot
parent bb24185ff4
commit 97db9fde69
28 changed files with 67 additions and 60 deletions

View File

@ -38,7 +38,7 @@ performance-*,
-performance-noexcept-move-constructor,
-performance-unnecessary-value-param,
'
HeaderFilterRegex: '(c10/(?!test)/|torch/csrc/(?!deploy/interpreter/cpython)).*'
HeaderFilterRegex: '^(c10/(?!test)|torch/csrc/(?!deploy/interpreter/cpython)).*$'
AnalyzeTemporaryDtors: false
WarningsAsErrors: '*'
CheckOptions:

View File

@ -211,7 +211,6 @@ command = [
[[linter]]
code = 'CLANGTIDY'
include_patterns = [
'c10/core/*.cpp',
'c10/core/**/*.cpp',
'torch/csrc/fx/**/*.cpp',
'torch/csrc/generic/**/*.cpp',

View File

@ -97,8 +97,8 @@ CPUCachingAllocator* GetThreadLocalCachingAllocator() {
}
WithCPUCachingAllocatorGuard::WithCPUCachingAllocatorGuard(
CPUCachingAllocator* allocator) {
prev_caching_allocator_ptr_ = GetThreadLocalCachingAllocator();
CPUCachingAllocator* allocator)
: prev_caching_allocator_ptr_(GetThreadLocalCachingAllocator()) {
caching_allocator_ptr = allocator;
}

View File

@ -7,6 +7,7 @@
#include <numeric>
#include <sstream>
#include <string>
#include <utility>
namespace c10 {
@ -183,11 +184,11 @@ void warn(const Warning& warning) {
Warning::Warning(
warning_variant_t type,
const SourceLocation& source_location,
const std::string& msg,
std::string msg,
const bool verbatim)
: type_(type),
source_location_(source_location),
msg_(msg),
msg_(std::move(msg)),
verbatim_(verbatim) {}
Warning::Warning(
@ -195,7 +196,7 @@ Warning::Warning(
SourceLocation source_location,
detail::CompileTimeEmptyString msg,
const bool verbatim)
: Warning(type, std::move(source_location), "", verbatim) {}
: Warning(type, source_location, "", verbatim) {}
Warning::Warning(
warning_variant_t type,
@ -203,7 +204,7 @@ Warning::Warning(
const char* msg,
const bool verbatim)
: type_(type),
source_location_(std::move(source_location)),
source_location_(source_location),
msg_(std::string(msg)),
verbatim_(verbatim) {}

View File

@ -123,7 +123,7 @@ class C10_API Warning {
Warning(
warning_variant_t type,
const SourceLocation& source_location,
const std::string& msg,
std::string msg,
bool verbatim);
Warning(

View File

@ -126,7 +126,7 @@ void SmallVectorBase<Size_T>::grow_pod(
size_t MinSize,
size_t TSize) {
size_t NewCapacity = getNewCapacity<Size_T>(MinSize, TSize, this->capacity());
void* NewElts;
void* NewElts = nullptr;
if (BeginX == FirstEl) {
NewElts = std::malloc(NewCapacity * TSize);
if (NewElts == nullptr) {

View File

@ -129,7 +129,7 @@ std::ostream& operator<<(std::ostream& o, const uint128& b) {
// Select a divisor which is the largest power of the base < 2^64.
uint128 div;
std::streamsize div_base_log;
std::streamsize div_base_log = 0;
switch (flags & std::ios::basefield) {
case std::ios::hex:
div = (uint64_t)0x1000000000000000u; // 16^15

View File

@ -49,7 +49,7 @@ int GetNUMANode(const void* ptr) {
TORCH_CHECK(
get_mempolicy(
&numa_node,
NULL,
nullptr,
0,
const_cast<void*>(ptr),
MPOL_F_NODE | MPOL_F_ADDR) == 0,

View File

@ -57,7 +57,7 @@ void hookupHandler() {
if (hookedUpCount++) {
return;
}
struct sigaction sa;
struct sigaction sa {};
// Setup the handler
sa.sa_handler = &handleSignal;
// Restart the system call, if at all possible
@ -78,7 +78,7 @@ void unhookHandler() {
if (--hookedUpCount > 0) {
return;
}
struct sigaction sa;
struct sigaction sa {};
// Setup the sighub handler
sa.sa_handler = SIG_DFL;
// Restart the system call, if at all possible
@ -106,7 +106,7 @@ FatalSignalHandler& FatalSignalHandler::getInstance() {
return *handler;
}
FatalSignalHandler::~FatalSignalHandler() {}
FatalSignalHandler::~FatalSignalHandler() = default;
FatalSignalHandler::FatalSignalHandler()
: fatalSignalHandlersInstalled(false),
@ -205,7 +205,7 @@ void FatalSignalHandler::fatalSignalHandler(int signum) {
if (procDir) {
pid_t pid = getpid();
pid_t currentTid = syscall(SYS_gettid);
struct dirent* entry;
struct dirent* entry = nullptr;
pthread_mutex_lock(&writingMutex);
while ((entry = readdir(procDir)) != nullptr) {
if (entry->d_name[0] == '.') {
@ -263,7 +263,7 @@ void FatalSignalHandler::installFatalSignalHandlers() {
return;
}
fatalSignalHandlersInstalled = true;
struct sigaction sa;
struct sigaction sa {};
sigemptyset(&sa.sa_mask);
// Since we'll be in an exiting situation it's possible there's memory
// corruption, so make our own stack just in case.

View File

@ -78,7 +78,7 @@ class TORCH_API FatalSignalHandler {
bool fatalSignalHandlersInstalled;
// We need to hold a reference to call the previous SIGUSR2 handler in case
// we didn't signal it
struct sigaction previousSigusr2;
struct sigaction previousSigusr2 {};
// Flag dictating whether the SIGUSR2 handler falls back to previous handlers
// or is intercepted in order to print a stack trace.
std::atomic<bool> fatalSignalReceived;

View File

@ -9,7 +9,7 @@ Backend::Backend(int rank, int size)
C10_LOG_API_USAGE_ONCE("c10d.backend");
}
Backend::~Backend() {}
Backend::~Backend() = default;
void Backend::init() {
C10_LOG_API_USAGE_ONCE(fmt::format("c10d.backend_{}", getBackendName()));

View File

@ -1,9 +1,9 @@
#include <torch/csrc/distributed/c10d/FileStore.hpp>
#include <assert.h>
#include <fcntl.h>
#include <stdint.h>
#include <sys/stat.h>
#include <cassert>
#include <cstdint>
#ifdef _WIN32
#include <c10/util/win32-headers.h>
@ -22,6 +22,7 @@
#include <sstream>
#include <system_error>
#include <thread>
#include <utility>
#include <c10/util/Exception.h>
@ -125,7 +126,7 @@ class Lock {
#ifdef _WIN32
auto rv = syscall(std::bind(::flock_, fd_, operation));
#else
auto rv = syscall(std::bind(::flock, fd_, operation));
auto rv = syscall([this, operation] { return ::flock(fd_, operation); });
#endif
SYSASSERT(rv, "flock");
}
@ -143,7 +144,9 @@ class File {
fd_ = syscall(std::bind(
::open, path.c_str(), flags | _O_BINARY, _S_IREAD | _S_IWRITE));
#else
fd_ = syscall(std::bind(::open, path.c_str(), flags, 0644));
fd_ = syscall([capture0 = path.c_str(), flags] {
return ::open(capture0, flags, 0644);
});
#endif
// Only retry when the file doesn't exist, since we are waiting for the
// file to be created in this case to address the following issue:
@ -174,13 +177,14 @@ class File {
}
off_t seek(off_t offset, int whence) {
auto rv = syscall(std::bind(lseek, fd_, offset, whence));
auto rv =
syscall([this, offset, whence] { return lseek(fd_, offset, whence); });
SYSASSERT(rv, "lseek");
return rv;
}
off_t tell() {
auto rv = syscall(std::bind(lseek, fd_, 0, SEEK_CUR));
auto rv = syscall([this] { return lseek(fd_, 0, SEEK_CUR); });
SYSASSERT(rv, "lseek");
return rv;
}
@ -194,7 +198,8 @@ class File {
void write(const void* buf, size_t count) {
while (count > 0) {
auto rv = syscall(std::bind(::write, fd_, buf, count));
auto rv =
syscall([this, buf, count] { return ::write(fd_, buf, count); });
SYSASSERT(rv, "write");
buf = (uint8_t*)buf + rv;
count -= rv;
@ -203,7 +208,7 @@ class File {
void read(void* buf, size_t count) {
while (count > 0) {
auto rv = syscall(std::bind(::read, fd_, buf, count));
auto rv = syscall([this, buf, count] { return ::read(fd_, buf, count); });
SYSASSERT(rv, "read");
buf = (uint8_t*)buf + rv;
count -= rv;
@ -225,7 +230,7 @@ class File {
}
void read(std::string& str) {
uint32_t len;
uint32_t len = 0;
read(&len, sizeof(len));
std::vector<uint8_t> buf(len);
read(buf.data(), len);
@ -233,7 +238,7 @@ class File {
}
void read(std::vector<uint8_t>& data) {
uint32_t len;
uint32_t len = 0;
read(&len, sizeof(len));
data.resize(len);
read(data.data(), len);
@ -270,9 +275,9 @@ off_t refresh(
} // namespace
FileStore::FileStore(const std::string& path, int numWorkers)
FileStore::FileStore(std::string path, int numWorkers)
: Store(),
path_(path),
path_(std::move(path)),
pos_(0),
numWorkers_(numWorkers),
cleanupKey_("cleanup/"),

View File

@ -11,7 +11,7 @@ namespace c10d {
class TORCH_API FileStore : public Store {
public:
explicit FileStore(const std::string& path, int numWorkers);
explicit FileStore(std::string path, int numWorkers);
virtual ~FileStore();

View File

@ -2,7 +2,7 @@
#ifdef USE_C10D_GLOO
#include <stdlib.h>
#include <cstdlib>
#include <c10/util/Exception.h>

View File

@ -1,8 +1,8 @@
#include <torch/csrc/distributed/c10d/HashStore.hpp>
#include <errno.h>
#include <stdint.h>
#include <unistd.h>
#include <cerrno>
#include <cstdint>
#include <chrono>
#include <cstdio>

View File

@ -1,11 +1,10 @@
#include <torch/csrc/distributed/c10d/PrefixStore.hpp>
#include <utility>
namespace c10d {
PrefixStore::PrefixStore(
const std::string& prefix,
c10::intrusive_ptr<Store> store)
: prefix_(prefix), store_(store) {}
PrefixStore::PrefixStore(std::string prefix, c10::intrusive_ptr<Store> store)
: prefix_(std::move(prefix)), store_(std::move(store)) {}
std::string PrefixStore::joinKey(const std::string& key) {
return prefix_ + "/" + key;

View File

@ -8,7 +8,7 @@ namespace c10d {
class TORCH_API PrefixStore : public Store {
public:
explicit PrefixStore(
const std::string& prefix,
std::string prefix,
c10::intrusive_ptr<Store> store);
virtual ~PrefixStore(){};

View File

@ -147,7 +147,7 @@ ProcessGroup::ProcessGroup(
ProcessGroup::ProcessGroup(int rank, int size)
: rank_(rank), size_(size), backendType_(BackendType::UNDEFINED) {}
ProcessGroup::~ProcessGroup() {}
ProcessGroup::~ProcessGroup() = default;
void ProcessGroup::init() {
C10_LOG_API_USAGE_ONCE(

View File

@ -626,16 +626,16 @@ void socketInitialize() {
// gracefully fall back to an alternative if it doesn't.
bool doesHostnameResolveToUsableAddress(const std::string& hostname) {
socketInitialize();
struct addrinfo hints;
struct addrinfo hints {};
memset(&hints, 0, sizeof(hints));
hints.ai_family = AF_UNSPEC;
hints.ai_socktype = SOCK_STREAM;
struct addrinfo* result;
struct addrinfo* result = nullptr;
auto rv = getaddrinfo(hostname.c_str(), nullptr, &hints, &result);
if (rv < 0) {
return false;
}
struct addrinfo* rp;
struct addrinfo* rp = nullptr;
for (rp = result; rp != nullptr; rp = rp->ai_next) {
auto fd = socket(rp->ai_family, rp->ai_socktype, rp->ai_protocol);
if (fd == -1) {

View File

@ -18,7 +18,7 @@ ProcessGroupRoundRobin::ProcessGroupRoundRobin(
iterator_ = processGroups_.begin();
}
ProcessGroupRoundRobin::~ProcessGroupRoundRobin() {}
ProcessGroupRoundRobin::~ProcessGroupRoundRobin() = default;
c10::intrusive_ptr<Work> ProcessGroupRoundRobin::broadcast(
std::vector<at::Tensor>& tensors,

View File

@ -13,6 +13,7 @@
#include <torch/csrc/distributed/c10d/ProcessGroup.hpp>
#include <torch/csrc/distributed/c10d/ProcessGroupGloo.hpp>
#include <stdexcept>
#include <utility>
namespace c10d {
@ -23,7 +24,7 @@ struct CollectiveFingerPrint {
// Current collective's operation type.
OpType op_type_;
// Number of input tensors
std::size_t num_tensors_;
std::size_t num_tensors_{};
// input tensor data types
std::vector<int8_t> tensor_dtypes_;
// input tensor device types
@ -52,9 +53,9 @@ struct CollectiveFingerPrint {
std::vector<int8_t> tensor_device_types,
std::vector<std::vector<int64_t>> tensor_sizes)
: op_type_(op_type),
tensor_dtypes_(tensor_dtypes),
tensor_device_types_(tensor_device_types),
tensor_sizes_(tensor_sizes) {}
tensor_dtypes_(std::move(tensor_dtypes)),
tensor_device_types_(std::move(tensor_device_types)),
tensor_sizes_(std::move(tensor_sizes)) {}
// Logs collective information in case of a failure.
friend std::ostream& operator<<(
@ -267,7 +268,7 @@ ProcessGroupWrapper::ProcessGroupWrapper(
c10::intrusive_ptr<Backend> glooBackend)
: Backend(backend->getRank(), backend->getSize()),
backend_(backend),
glooBackend_(glooBackend) {
glooBackend_(std::move(glooBackend)) {
// Set the sequence number for the underlying process group.
backend_->setSequenceNumberForGroup();
}

View File

@ -6,7 +6,7 @@ constexpr std::chrono::milliseconds Store::kDefaultTimeout;
constexpr std::chrono::milliseconds Store::kNoTimeout;
// Define destructor symbol for abstract base class.
Store::~Store() {}
Store::~Store() = default;
const std::chrono::milliseconds& Store::getTimeout() const noexcept {
return timeout_;

View File

@ -434,7 +434,7 @@ void TCPStoreMasterDaemon::deleteHandler(int socket) {
}
void TCPStoreMasterDaemon::checkHandler(int socket) const {
SizeType nargs;
SizeType nargs = 0;
tcputil::recvBytes<SizeType>(socket, &nargs, 1);
std::vector<std::string> keys(nargs);
for (const auto i : c10::irange(nargs)) {
@ -449,7 +449,7 @@ void TCPStoreMasterDaemon::checkHandler(int socket) const {
}
void TCPStoreMasterDaemon::waitHandler(int socket) {
SizeType nargs;
SizeType nargs = 0;
tcputil::recvBytes<SizeType>(socket, &nargs, 1);
std::vector<std::string> keys(nargs);
for (const auto i : c10::irange(nargs)) {
@ -741,7 +741,7 @@ void TCPStoreWorkerDaemon::run() {
}
// if connection is closed gracefully by master, peeked data will return 0
char data;
char data = 0;
int ret = recv(fds[1].fd, &data, 1, MSG_PEEK);
if (ret == 0) {
continue;

View File

@ -1,6 +1,7 @@
#include <ATen/ThreadLocalState.h>
#include <torch/csrc/distributed/c10d/Work.hpp>
#include <utility>
namespace c10d {
@ -129,9 +130,9 @@ void Work::finishAndThrow(std::exception_ptr exception) {
class FutureWrappingWork : public Work {
public:
FutureWrappingWork(c10::intrusive_ptr<c10::ivalue::Future> fut)
: Work(), _fut(fut) {}
: Work(), _fut(std::move(fut)) {}
~FutureWrappingWork() {}
~FutureWrappingWork() override = default;
bool isCompleted() override {
return _fut->completed();

View File

@ -258,7 +258,7 @@ static PyMethodDef reduceopmeta_methods[] = {
(PyCFunction)reduceopmeta___instancecheck__,
METH_O,
"Custom `__instancecheck__` for ReduceOp"},
{NULL, NULL}};
{nullptr, nullptr}};
PyTypeObject* GetReduceOpMetaclass() {
static auto* metaclass = [] {
PyTypeObject* base_metaclass =

View File

@ -49,8 +49,8 @@ std::ostream& operator<<(std::ostream& output, const Logger& logger) {
return output << loggerInfo;
}
Logger::Logger(std::shared_ptr<c10d::Reducer> reducer) {
reducer_ = reducer;
Logger::Logger(std::shared_ptr<c10d::Reducer> reducer)
: reducer_(std::move(reducer)) {
ddp_logging_data_ = std::make_unique<at::DDPLoggingData>();
}

View File

@ -31,7 +31,7 @@ void SequenceNum::increment() {
// Implemented without above get() and increment() so we don't repeatedly lock
// and unblock.
uint64_t SequenceNum::getAndIncrement() {
uint64_t curVal;
uint64_t curVal = 0;
std::lock_guard<std::mutex> lock(lock_);
TORCH_CHECK(num_ != c10::nullopt);
curVal = *num_;

View File

@ -46,6 +46,7 @@ class LibKinetoClient : public libkineto::ClientInterface {
(void)disableProfiler();
}
// NOLINTNEXTLINE(modernize-use-override)
void set_withstack(bool withStack) override {
withStack_ = withStack;
}