mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 12:54:11 +08:00
[8/N] Fix clang-tidy warnings in jit (#131997)
Follows #131996 Pull Request resolved: https://github.com/pytorch/pytorch/pull/131997 Approved by: https://github.com/Skylion007
This commit is contained in:
@ -3,8 +3,7 @@
|
||||
#include <c10/cuda/CUDAStream.h>
|
||||
#include <torch/custom_class.h>
|
||||
|
||||
namespace torch {
|
||||
namespace jit {
|
||||
namespace torch::jit {
|
||||
|
||||
class CUDAEvent;
|
||||
// This class is a wrapper around c10::cuda::CUDAStream.
|
||||
@ -39,9 +38,9 @@ class CUDAStream final : public CustomClassHolder {
|
||||
stream_->synchronize();
|
||||
}
|
||||
|
||||
void waitEvent(c10::intrusive_ptr<CUDAEvent> event);
|
||||
void waitEvent(const c10::intrusive_ptr<CUDAEvent>& event);
|
||||
|
||||
void waitStream(c10::intrusive_ptr<CUDAStream> stream);
|
||||
void waitStream(const c10::intrusive_ptr<CUDAStream>& stream);
|
||||
|
||||
/// Get the CUDA device index that this stream is associated with.
|
||||
int64_t device_index() const {
|
||||
@ -91,7 +90,7 @@ class CUDAEvent final : public CustomClassHolder {
|
||||
event_ = std::make_unique<at::cuda::CUDAEvent>(flags);
|
||||
}
|
||||
|
||||
double elapsedTime(c10::intrusive_ptr<CUDAEvent> end) {
|
||||
double elapsedTime(const c10::intrusive_ptr<CUDAEvent>& end) {
|
||||
return event_->elapsed_time(*end->event_);
|
||||
}
|
||||
|
||||
@ -107,12 +106,12 @@ class CUDAEvent final : public CustomClassHolder {
|
||||
return event_->query();
|
||||
}
|
||||
|
||||
void record(c10::intrusive_ptr<CUDAStream> stream);
|
||||
void record(const c10::intrusive_ptr<CUDAStream>& stream);
|
||||
|
||||
void synchronize() {
|
||||
event_->synchronize();
|
||||
}
|
||||
void wait(c10::intrusive_ptr<CUDAStream> stream);
|
||||
void wait(const c10::intrusive_ptr<CUDAStream>& stream);
|
||||
|
||||
private:
|
||||
void recordInternal(CUDAStream* stream);
|
||||
@ -121,7 +120,7 @@ class CUDAEvent final : public CustomClassHolder {
|
||||
friend class CUDAStream;
|
||||
};
|
||||
|
||||
c10::intrusive_ptr<CUDAEvent> CUDAStream::recordEvent(
|
||||
inline c10::intrusive_ptr<CUDAEvent> CUDAStream::recordEvent(
|
||||
c10::intrusive_ptr<CUDAEvent> event) {
|
||||
if (!event) {
|
||||
event = c10::make_intrusive<CUDAEvent>();
|
||||
@ -131,25 +130,26 @@ c10::intrusive_ptr<CUDAEvent> CUDAStream::recordEvent(
|
||||
return event;
|
||||
}
|
||||
|
||||
void CUDAStream::waitEvent(c10::intrusive_ptr<CUDAEvent> event) {
|
||||
inline void CUDAStream::waitEvent(const c10::intrusive_ptr<CUDAEvent>& event) {
|
||||
event->event_->block(*stream_);
|
||||
}
|
||||
|
||||
void CUDAStream::waitStream(c10::intrusive_ptr<CUDAStream> stream) {
|
||||
inline void CUDAStream::waitStream(
|
||||
const c10::intrusive_ptr<CUDAStream>& stream) {
|
||||
auto ev = c10::make_intrusive<CUDAEvent>();
|
||||
stream->recordEvent(ev);
|
||||
waitEvent(ev);
|
||||
}
|
||||
|
||||
void CUDAEvent::record(c10::intrusive_ptr<CUDAStream> stream) {
|
||||
inline void CUDAEvent::record(const c10::intrusive_ptr<CUDAStream>& stream) {
|
||||
event_->record(*stream->stream_);
|
||||
}
|
||||
|
||||
void CUDAEvent::recordInternal(CUDAStream* stream) {
|
||||
inline void CUDAEvent::recordInternal(CUDAStream* stream) {
|
||||
event_->record(*stream->stream_);
|
||||
}
|
||||
|
||||
void CUDAEvent::wait(c10::intrusive_ptr<CUDAStream> stream) {
|
||||
inline void CUDAEvent::wait(const c10::intrusive_ptr<CUDAStream>& stream) {
|
||||
event_->block(*stream->stream_);
|
||||
}
|
||||
|
||||
@ -181,5 +181,4 @@ TORCH_LIBRARY(cuda, m) {
|
||||
.def("wait", &CUDAEvent::wait);
|
||||
};
|
||||
|
||||
} // namespace jit
|
||||
} // namespace torch
|
||||
} // namespace torch::jit
|
||||
|
@ -15,8 +15,7 @@
|
||||
#include <torch/csrc/jit/jit_log.h>
|
||||
#include <torch/csrc/jit/serialization/python_print.h>
|
||||
|
||||
namespace torch {
|
||||
namespace jit {
|
||||
namespace torch::jit {
|
||||
|
||||
class JitLoggingConfig {
|
||||
public:
|
||||
@ -33,10 +32,10 @@ class JitLoggingConfig {
|
||||
std::ostream* out;
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
||||
JitLoggingConfig() {
|
||||
JitLoggingConfig() : out(&std::cerr) {
|
||||
const char* jit_log_level = std::getenv("PYTORCH_JIT_LOG_LEVEL");
|
||||
logging_levels.assign(jit_log_level == nullptr ? "" : jit_log_level);
|
||||
out = &std::cerr;
|
||||
|
||||
parse();
|
||||
}
|
||||
void parse();
|
||||
@ -146,7 +145,7 @@ std::string jit_log_prefix(
|
||||
std::stringstream out_ss;
|
||||
std::string line;
|
||||
while (std::getline(in_ss, line)) {
|
||||
out_ss << prefix << line << std::endl;
|
||||
out_ss << prefix << line << '\n';
|
||||
}
|
||||
|
||||
return out_ss.str();
|
||||
@ -185,5 +184,4 @@ std::ostream& operator<<(std::ostream& out, JitLoggingLevels level) {
|
||||
return out;
|
||||
}
|
||||
|
||||
} // namespace jit
|
||||
} // namespace torch
|
||||
} // namespace torch::jit
|
||||
|
@ -38,8 +38,7 @@
|
||||
// `>>>` is also valid and **currently** is equivalent to `GRAPH_DEBUG` as there
|
||||
// is no logging level that is higher than `GRAPH_DEBUG`.
|
||||
|
||||
namespace torch {
|
||||
namespace jit {
|
||||
namespace torch::jit {
|
||||
|
||||
struct Node;
|
||||
struct Graph;
|
||||
@ -124,5 +123,4 @@ TORCH_API std::ostream& operator<<(
|
||||
(is_enabled(__FILE__, ::torch::jit::JitLoggingLevels::GRAPH_UPDATE))
|
||||
#define GRAPH_DEBUG_ENABLED \
|
||||
(is_enabled(__FILE__, ::torch::jit::JitLoggingLevels::GRAPH_DEBUG))
|
||||
} // namespace jit
|
||||
} // namespace torch
|
||||
} // namespace torch::jit
|
||||
|
@ -11,8 +11,7 @@
|
||||
#include <torch/csrc/jit/api/function_impl.h>
|
||||
#include <torch/csrc/jit/jit_opt_limit.h>
|
||||
|
||||
namespace torch {
|
||||
namespace jit {
|
||||
namespace torch::jit {
|
||||
|
||||
static std::unordered_map<std::string, int64_t>& passes_to_current_counter() {
|
||||
static std::unordered_map<std::string, int64_t> passes_to_current_counter;
|
||||
@ -21,8 +20,7 @@ static std::unordered_map<std::string, int64_t>& passes_to_current_counter() {
|
||||
|
||||
static int parseOptLimit(const std::string& opt_limit) {
|
||||
try {
|
||||
int64_t n = std::stoi(opt_limit);
|
||||
return n;
|
||||
return std::stoi(opt_limit);
|
||||
} catch (...) {
|
||||
return -1;
|
||||
}
|
||||
@ -82,5 +80,4 @@ bool opt_limit(const char* pass_name) {
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace jit
|
||||
} // namespace torch
|
||||
} // namespace torch::jit
|
||||
|
@ -28,12 +28,10 @@
|
||||
// return;
|
||||
// }
|
||||
|
||||
namespace torch {
|
||||
namespace jit {
|
||||
namespace torch::jit {
|
||||
|
||||
TORCH_API bool opt_limit(const char* pass_name);
|
||||
|
||||
#define JIT_OPT_ALLOWED opt_limit(__FILE__)
|
||||
|
||||
} // namespace jit
|
||||
} // namespace torch
|
||||
} // namespace torch::jit
|
||||
|
@ -1,8 +1,7 @@
|
||||
#pragma once
|
||||
#include <functional>
|
||||
|
||||
namespace torch {
|
||||
namespace jit {
|
||||
namespace torch::jit {
|
||||
|
||||
class ResourceGuard {
|
||||
std::function<void()> _destructor;
|
||||
@ -23,5 +22,4 @@ class ResourceGuard {
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace jit
|
||||
} // namespace torch
|
||||
} // namespace torch::jit
|
||||
|
Reference in New Issue
Block a user