mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
[Reland] increase clang-tidy coverage in torch/csrc (#108875)
Reland PR #103058 since there was a time gap between this PR and other PRs in terms of torch/csrc modifications Pull Request resolved: https://github.com/pytorch/pytorch/pull/108875 Approved by: https://github.com/Skylion007
This commit is contained in:
@ -44,7 +44,7 @@ modernize-*,
|
|||||||
performance-*,
|
performance-*,
|
||||||
readability-container-size-empty,
|
readability-container-size-empty,
|
||||||
'
|
'
|
||||||
HeaderFilterRegex: '^(c10/(?!test)|torch/csrc/(?!deploy/interpreter/cpython)).*$'
|
HeaderFilterRegex: '^(c10/(?!test)|torch/csrc/).*$'
|
||||||
AnalyzeTemporaryDtors: false
|
AnalyzeTemporaryDtors: false
|
||||||
WarningsAsErrors: '*'
|
WarningsAsErrors: '*'
|
||||||
...
|
...
|
||||||
|
1
.github/workflows/lint.yml
vendored
1
.github/workflows/lint.yml
vendored
@ -17,6 +17,7 @@ jobs:
|
|||||||
lintrunner:
|
lintrunner:
|
||||||
uses: pytorch/test-infra/.github/workflows/linux_job.yml@main
|
uses: pytorch/test-infra/.github/workflows/linux_job.yml@main
|
||||||
with:
|
with:
|
||||||
|
timeout: 120
|
||||||
runner: linux.2xlarge
|
runner: linux.2xlarge
|
||||||
docker-image: pytorch-linux-focal-linter
|
docker-image: pytorch-linux-focal-linter
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
@ -277,33 +277,33 @@ command = [
|
|||||||
code = 'CLANGTIDY'
|
code = 'CLANGTIDY'
|
||||||
include_patterns = [
|
include_patterns = [
|
||||||
'c10/**/*.cpp',
|
'c10/**/*.cpp',
|
||||||
'torch/csrc/fx/**/*.cpp',
|
'torch/csrc/**/*.cpp',
|
||||||
'torch/csrc/generic/**/*.cpp',
|
|
||||||
'torch/csrc/onnx/**/*.cpp',
|
|
||||||
'torch/csrc/tensor/**/*.cpp',
|
|
||||||
'torch/csrc/utils/**/*.cpp',
|
|
||||||
]
|
]
|
||||||
exclude_patterns = [
|
exclude_patterns = [
|
||||||
# The negative filters below are to exclude files that include onnx_pb.h or
|
# The negative filters below are to exclude files that include onnx_pb.h or
|
||||||
# caffe2_pb.h, otherwise we'd have to build protos as part of this CI job.
|
# caffe2_pb.h, otherwise we'd have to build protos as part of this CI job.
|
||||||
# FunctionsManual.cpp is excluded to keep this diff clean. It will be fixed
|
# CUDA files are also excluded.
|
||||||
# in a follow up PR.
|
|
||||||
# that are not easily converted to accepted c++
|
|
||||||
'c10/cuda/**/*.cpp',
|
|
||||||
'c10/test/**/*.cpp',
|
|
||||||
'**/fb/**',
|
'**/fb/**',
|
||||||
'torch/csrc/jit/passes/onnx/helper.cpp',
|
'**/*pb.h',
|
||||||
'torch/csrc/jit/passes/onnx/shape_type_inference.cpp',
|
'**/*CUDA*',
|
||||||
'torch/csrc/jit/serialization/onnx.cpp',
|
'**/cuda/*pp',
|
||||||
'torch/csrc/jit/serialization/export.cpp',
|
'c10/test/**',
|
||||||
'torch/csrc/jit/serialization/import.cpp',
|
'third_party/**/*',
|
||||||
|
'torch/csrc/api/**',
|
||||||
|
'torch/csrc/autograd/**',
|
||||||
|
'torch/csrc/CudaIPCTypes.cpp',
|
||||||
|
'torch/csrc/cuda/**',
|
||||||
|
'torch/csrc/dynamo/*',
|
||||||
|
'torch/csrc/distributed/**/*',
|
||||||
|
'torch/csrc/inductor/**/*',
|
||||||
|
'torch/csrc/jit/**/*',
|
||||||
'torch/csrc/jit/serialization/import_legacy.cpp',
|
'torch/csrc/jit/serialization/import_legacy.cpp',
|
||||||
|
'torch/csrc/jit/serialization/export.cpp',
|
||||||
|
'torch/csrc/lazy/**/*',
|
||||||
'torch/csrc/onnx/init.cpp',
|
'torch/csrc/onnx/init.cpp',
|
||||||
'torch/csrc/cuda/nccl.*',
|
'torch/csrc/profiler/**/*',
|
||||||
'torch/csrc/cuda/python_nccl.cpp',
|
'torch/csrc/quantized/**/*',
|
||||||
'torch/csrc/autograd/FunctionsManual.cpp',
|
'torch/csrc/mps/**/*',
|
||||||
'torch/csrc/jit/codegen/cuda/runtime/*',
|
|
||||||
'torch/csrc/utils/disable_torch_function.cpp',
|
|
||||||
]
|
]
|
||||||
init_command = [
|
init_command = [
|
||||||
'python3',
|
'python3',
|
||||||
|
@ -22,7 +22,7 @@ struct C10_API SafePyObject {
|
|||||||
// Steals a reference to data
|
// Steals a reference to data
|
||||||
SafePyObject(PyObject* data, c10::impl::PyInterpreter* pyinterpreter)
|
SafePyObject(PyObject* data, c10::impl::PyInterpreter* pyinterpreter)
|
||||||
: data_(data), pyinterpreter_(pyinterpreter) {}
|
: data_(data), pyinterpreter_(pyinterpreter) {}
|
||||||
SafePyObject(SafePyObject&& other)
|
SafePyObject(SafePyObject&& other) noexcept
|
||||||
: data_(std::exchange(other.data_, nullptr)),
|
: data_(std::exchange(other.data_, nullptr)),
|
||||||
pyinterpreter_(other.pyinterpreter_) {}
|
pyinterpreter_(other.pyinterpreter_) {}
|
||||||
|
|
||||||
|
@ -75,7 +75,7 @@ inline int64_t size_from_dim_(int k, IntArrayRef dims) {
|
|||||||
|
|
||||||
// Product of all dims up to k (not including dims[k])
|
// Product of all dims up to k (not including dims[k])
|
||||||
inline int64_t size_to_dim_(int k, IntArrayRef dims) {
|
inline int64_t size_to_dim_(int k, IntArrayRef dims) {
|
||||||
TORCH_CHECK((unsigned)k <= dims.size());
|
TORCH_CHECK(k >= 0 && static_cast<size_t>(k) <= dims.size());
|
||||||
int64_t r = 1;
|
int64_t r = 1;
|
||||||
for (const auto i : c10::irange(k)) {
|
for (const auto i : c10::irange(k)) {
|
||||||
r *= dims[i];
|
r *= dims[i];
|
||||||
|
@ -27,7 +27,7 @@ class OptionalArrayRef final {
|
|||||||
|
|
||||||
OptionalArrayRef(const OptionalArrayRef& other) = default;
|
OptionalArrayRef(const OptionalArrayRef& other) = default;
|
||||||
|
|
||||||
OptionalArrayRef(OptionalArrayRef&& other) = default;
|
OptionalArrayRef(OptionalArrayRef&& other) noexcept = default;
|
||||||
|
|
||||||
constexpr OptionalArrayRef(const optional<ArrayRef<T>>& other) noexcept
|
constexpr OptionalArrayRef(const optional<ArrayRef<T>>& other) noexcept
|
||||||
: wrapped_opt_array_ref(other) {}
|
: wrapped_opt_array_ref(other) {}
|
||||||
@ -90,7 +90,7 @@ class OptionalArrayRef final {
|
|||||||
|
|
||||||
OptionalArrayRef& operator=(const OptionalArrayRef& other) = default;
|
OptionalArrayRef& operator=(const OptionalArrayRef& other) = default;
|
||||||
|
|
||||||
OptionalArrayRef& operator=(OptionalArrayRef&& other) = default;
|
OptionalArrayRef& operator=(OptionalArrayRef&& other) noexcept = default;
|
||||||
|
|
||||||
constexpr OptionalArrayRef& operator=(
|
constexpr OptionalArrayRef& operator=(
|
||||||
const optional<ArrayRef<T>>& other) noexcept {
|
const optional<ArrayRef<T>>& other) noexcept {
|
||||||
|
@ -78,7 +78,7 @@ class either final {
|
|||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
|
|
||||||
either<Left, Right>& operator=(either<Left, Right>&& rhs) {
|
either<Left, Right>& operator=(either<Left, Right>&& rhs) noexcept {
|
||||||
_destruct();
|
_destruct();
|
||||||
_side = rhs._side;
|
_side = rhs._side;
|
||||||
if (_side == Side::left) {
|
if (_side == Side::left) {
|
||||||
|
@ -139,8 +139,8 @@ void ReturnRefCounter(const std::string& handle, uint64_t offset /* unused */) {
|
|||||||
|
|
||||||
CudaIPCSentData::CudaIPCSentData(
|
CudaIPCSentData::CudaIPCSentData(
|
||||||
std::string handle,
|
std::string handle,
|
||||||
int64_t offset,
|
uint64_t offset,
|
||||||
int64_t* counter_ptr,
|
uint64_t* counter_ptr,
|
||||||
at::Device device)
|
at::Device device)
|
||||||
: handle_(std::move(handle)),
|
: handle_(std::move(handle)),
|
||||||
offset_(offset),
|
offset_(offset),
|
||||||
@ -206,7 +206,7 @@ CudaIPCSentData::~CudaIPCSentData() {
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
int64_t CudaIPCSentData::counter_value() {
|
uint64_t CudaIPCSentData::counter_value() {
|
||||||
return *counter_ptr_;
|
return *counter_ptr_;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -22,8 +22,8 @@ struct CudaIPCReceivedData final {
|
|||||||
|
|
||||||
struct CudaIPCSentData final {
|
struct CudaIPCSentData final {
|
||||||
std::string handle_;
|
std::string handle_;
|
||||||
int64_t offset_;
|
uint64_t offset_;
|
||||||
int64_t* counter_ptr_; // Reference counter shared memory block
|
uint64_t* counter_ptr_; // Reference counter shared memory block
|
||||||
at::DataPtr original_ptr_; // Original mem allocation
|
at::DataPtr original_ptr_; // Original mem allocation
|
||||||
cudaEvent_t event_; // Sync cuEventDestroy
|
cudaEvent_t event_; // Sync cuEventDestroy
|
||||||
bool event_sync_required_;
|
bool event_sync_required_;
|
||||||
@ -31,16 +31,16 @@ struct CudaIPCSentData final {
|
|||||||
|
|
||||||
CudaIPCSentData(
|
CudaIPCSentData(
|
||||||
std::string handle,
|
std::string handle,
|
||||||
int64_t offset,
|
uint64_t offset,
|
||||||
int64_t* counter_ptr,
|
uint64_t* counter_ptr,
|
||||||
at::Device device);
|
at::Device device);
|
||||||
~CudaIPCSentData();
|
~CudaIPCSentData();
|
||||||
|
|
||||||
int64_t counter_value();
|
uint64_t counter_value();
|
||||||
std::string handle() {
|
std::string handle() {
|
||||||
return handle_;
|
return handle_;
|
||||||
}
|
}
|
||||||
int64_t offset() {
|
uint64_t offset() {
|
||||||
return offset_;
|
return offset_;
|
||||||
}
|
}
|
||||||
void set_original_ptr(at::DataPtr data_ptr) {
|
void set_original_ptr(at::DataPtr data_ptr) {
|
||||||
@ -87,8 +87,8 @@ struct CudaIPCRefCountersFile final {
|
|||||||
handle_(std::move(handle)),
|
handle_(std::move(handle)),
|
||||||
refcounted_shared_mem_(std::move(data_ptr)) {}
|
refcounted_shared_mem_(std::move(data_ptr)) {}
|
||||||
|
|
||||||
int64_t* counter_ptr() {
|
uint64_t* counter_ptr() {
|
||||||
return static_cast<int64_t*>(refcounted_shared_mem_.get()) + next_offset_;
|
return static_cast<uint64_t*>(refcounted_shared_mem_.get()) + next_offset_;
|
||||||
}
|
}
|
||||||
|
|
||||||
void set_counter(uint64_t value) {
|
void set_counter(uint64_t value) {
|
||||||
@ -103,7 +103,7 @@ struct CudaIPCRefCountersFile final {
|
|||||||
return used_slots_;
|
return used_slots_;
|
||||||
}
|
}
|
||||||
|
|
||||||
int64_t get_offset() {
|
uint64_t get_offset() {
|
||||||
return next_offset_;
|
return next_offset_;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -134,8 +134,8 @@ static PyObject* THPStorage_pynew(
|
|||||||
torch::ParsedArgs<3> parsed_args;
|
torch::ParsedArgs<3> parsed_args;
|
||||||
auto r = parser.parse(args, kwargs, parsed_args);
|
auto r = parser.parse(args, kwargs, parsed_args);
|
||||||
|
|
||||||
int64_t allocator_arg_idx = 0;
|
int allocator_arg_idx = 0;
|
||||||
int64_t device_arg_idx = 1;
|
int device_arg_idx = 1;
|
||||||
|
|
||||||
if (r.idx > 0) {
|
if (r.idx > 0) {
|
||||||
allocator_arg_idx = 1;
|
allocator_arg_idx = 1;
|
||||||
|
@ -186,7 +186,7 @@ Node* createONNXConstant(
|
|||||||
at::Tensor value) {
|
at::Tensor value) {
|
||||||
Node* constant_node = graph->create(onnx::Constant, 1);
|
Node* constant_node = graph->create(onnx::Constant, 1);
|
||||||
constant_node->insertBefore(n_to_insert_before);
|
constant_node->insertBefore(n_to_insert_before);
|
||||||
constant_node->t_(attr::value, value);
|
constant_node->t_(attr::value, std::move(value));
|
||||||
return constant_node;
|
return constant_node;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user