mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
[Reland] increase clang-tidy coverage in torch/csrc (#108875)
Reland PR #103058 since there was a time gap between this PR and other PRs in terms of torch/csrc modifications Pull Request resolved: https://github.com/pytorch/pytorch/pull/108875 Approved by: https://github.com/Skylion007
This commit is contained in:
@ -44,7 +44,7 @@ modernize-*,
|
||||
performance-*,
|
||||
readability-container-size-empty,
|
||||
'
|
||||
HeaderFilterRegex: '^(c10/(?!test)|torch/csrc/(?!deploy/interpreter/cpython)).*$'
|
||||
HeaderFilterRegex: '^(c10/(?!test)|torch/csrc/).*$'
|
||||
AnalyzeTemporaryDtors: false
|
||||
WarningsAsErrors: '*'
|
||||
...
|
||||
|
1
.github/workflows/lint.yml
vendored
1
.github/workflows/lint.yml
vendored
@ -17,6 +17,7 @@ jobs:
|
||||
lintrunner:
|
||||
uses: pytorch/test-infra/.github/workflows/linux_job.yml@main
|
||||
with:
|
||||
timeout: 120
|
||||
runner: linux.2xlarge
|
||||
docker-image: pytorch-linux-focal-linter
|
||||
fetch-depth: 0
|
||||
|
@ -277,33 +277,33 @@ command = [
|
||||
code = 'CLANGTIDY'
|
||||
include_patterns = [
|
||||
'c10/**/*.cpp',
|
||||
'torch/csrc/fx/**/*.cpp',
|
||||
'torch/csrc/generic/**/*.cpp',
|
||||
'torch/csrc/onnx/**/*.cpp',
|
||||
'torch/csrc/tensor/**/*.cpp',
|
||||
'torch/csrc/utils/**/*.cpp',
|
||||
'torch/csrc/**/*.cpp',
|
||||
]
|
||||
exclude_patterns = [
|
||||
# The negative filters below are to exclude files that include onnx_pb.h or
|
||||
# caffe2_pb.h, otherwise we'd have to build protos as part of this CI job.
|
||||
# FunctionsManual.cpp is excluded to keep this diff clean. It will be fixed
|
||||
# in a follow up PR.
|
||||
# that are not easily converted to accepted c++
|
||||
'c10/cuda/**/*.cpp',
|
||||
'c10/test/**/*.cpp',
|
||||
# CUDA files are also excluded.
|
||||
'**/fb/**',
|
||||
'torch/csrc/jit/passes/onnx/helper.cpp',
|
||||
'torch/csrc/jit/passes/onnx/shape_type_inference.cpp',
|
||||
'torch/csrc/jit/serialization/onnx.cpp',
|
||||
'torch/csrc/jit/serialization/export.cpp',
|
||||
'torch/csrc/jit/serialization/import.cpp',
|
||||
'**/*pb.h',
|
||||
'**/*CUDA*',
|
||||
'**/cuda/*pp',
|
||||
'c10/test/**',
|
||||
'third_party/**/*',
|
||||
'torch/csrc/api/**',
|
||||
'torch/csrc/autograd/**',
|
||||
'torch/csrc/CudaIPCTypes.cpp',
|
||||
'torch/csrc/cuda/**',
|
||||
'torch/csrc/dynamo/*',
|
||||
'torch/csrc/distributed/**/*',
|
||||
'torch/csrc/inductor/**/*',
|
||||
'torch/csrc/jit/**/*',
|
||||
'torch/csrc/jit/serialization/import_legacy.cpp',
|
||||
'torch/csrc/jit/serialization/export.cpp',
|
||||
'torch/csrc/lazy/**/*',
|
||||
'torch/csrc/onnx/init.cpp',
|
||||
'torch/csrc/cuda/nccl.*',
|
||||
'torch/csrc/cuda/python_nccl.cpp',
|
||||
'torch/csrc/autograd/FunctionsManual.cpp',
|
||||
'torch/csrc/jit/codegen/cuda/runtime/*',
|
||||
'torch/csrc/utils/disable_torch_function.cpp',
|
||||
'torch/csrc/profiler/**/*',
|
||||
'torch/csrc/quantized/**/*',
|
||||
'torch/csrc/mps/**/*',
|
||||
]
|
||||
init_command = [
|
||||
'python3',
|
||||
|
@ -22,7 +22,7 @@ struct C10_API SafePyObject {
|
||||
// Steals a reference to data
|
||||
SafePyObject(PyObject* data, c10::impl::PyInterpreter* pyinterpreter)
|
||||
: data_(data), pyinterpreter_(pyinterpreter) {}
|
||||
SafePyObject(SafePyObject&& other)
|
||||
SafePyObject(SafePyObject&& other) noexcept
|
||||
: data_(std::exchange(other.data_, nullptr)),
|
||||
pyinterpreter_(other.pyinterpreter_) {}
|
||||
|
||||
|
@ -75,7 +75,7 @@ inline int64_t size_from_dim_(int k, IntArrayRef dims) {
|
||||
|
||||
// Product of all dims up to k (not including dims[k])
|
||||
inline int64_t size_to_dim_(int k, IntArrayRef dims) {
|
||||
TORCH_CHECK((unsigned)k <= dims.size());
|
||||
TORCH_CHECK(k >= 0 && static_cast<size_t>(k) <= dims.size());
|
||||
int64_t r = 1;
|
||||
for (const auto i : c10::irange(k)) {
|
||||
r *= dims[i];
|
||||
|
@ -27,7 +27,7 @@ class OptionalArrayRef final {
|
||||
|
||||
OptionalArrayRef(const OptionalArrayRef& other) = default;
|
||||
|
||||
OptionalArrayRef(OptionalArrayRef&& other) = default;
|
||||
OptionalArrayRef(OptionalArrayRef&& other) noexcept = default;
|
||||
|
||||
constexpr OptionalArrayRef(const optional<ArrayRef<T>>& other) noexcept
|
||||
: wrapped_opt_array_ref(other) {}
|
||||
@ -90,7 +90,7 @@ class OptionalArrayRef final {
|
||||
|
||||
OptionalArrayRef& operator=(const OptionalArrayRef& other) = default;
|
||||
|
||||
OptionalArrayRef& operator=(OptionalArrayRef&& other) = default;
|
||||
OptionalArrayRef& operator=(OptionalArrayRef&& other) noexcept = default;
|
||||
|
||||
constexpr OptionalArrayRef& operator=(
|
||||
const optional<ArrayRef<T>>& other) noexcept {
|
||||
|
@ -78,7 +78,7 @@ class either final {
|
||||
return *this;
|
||||
}
|
||||
|
||||
either<Left, Right>& operator=(either<Left, Right>&& rhs) {
|
||||
either<Left, Right>& operator=(either<Left, Right>&& rhs) noexcept {
|
||||
_destruct();
|
||||
_side = rhs._side;
|
||||
if (_side == Side::left) {
|
||||
|
@ -139,8 +139,8 @@ void ReturnRefCounter(const std::string& handle, uint64_t offset /* unused */) {
|
||||
|
||||
CudaIPCSentData::CudaIPCSentData(
|
||||
std::string handle,
|
||||
int64_t offset,
|
||||
int64_t* counter_ptr,
|
||||
uint64_t offset,
|
||||
uint64_t* counter_ptr,
|
||||
at::Device device)
|
||||
: handle_(std::move(handle)),
|
||||
offset_(offset),
|
||||
@ -206,7 +206,7 @@ CudaIPCSentData::~CudaIPCSentData() {
|
||||
#endif
|
||||
}
|
||||
|
||||
int64_t CudaIPCSentData::counter_value() {
|
||||
uint64_t CudaIPCSentData::counter_value() {
|
||||
return *counter_ptr_;
|
||||
}
|
||||
|
||||
|
@ -22,8 +22,8 @@ struct CudaIPCReceivedData final {
|
||||
|
||||
struct CudaIPCSentData final {
|
||||
std::string handle_;
|
||||
int64_t offset_;
|
||||
int64_t* counter_ptr_; // Reference counter shared memory block
|
||||
uint64_t offset_;
|
||||
uint64_t* counter_ptr_; // Reference counter shared memory block
|
||||
at::DataPtr original_ptr_; // Original mem allocation
|
||||
cudaEvent_t event_; // Sync cuEventDestroy
|
||||
bool event_sync_required_;
|
||||
@ -31,16 +31,16 @@ struct CudaIPCSentData final {
|
||||
|
||||
CudaIPCSentData(
|
||||
std::string handle,
|
||||
int64_t offset,
|
||||
int64_t* counter_ptr,
|
||||
uint64_t offset,
|
||||
uint64_t* counter_ptr,
|
||||
at::Device device);
|
||||
~CudaIPCSentData();
|
||||
|
||||
int64_t counter_value();
|
||||
uint64_t counter_value();
|
||||
std::string handle() {
|
||||
return handle_;
|
||||
}
|
||||
int64_t offset() {
|
||||
uint64_t offset() {
|
||||
return offset_;
|
||||
}
|
||||
void set_original_ptr(at::DataPtr data_ptr) {
|
||||
@ -87,8 +87,8 @@ struct CudaIPCRefCountersFile final {
|
||||
handle_(std::move(handle)),
|
||||
refcounted_shared_mem_(std::move(data_ptr)) {}
|
||||
|
||||
int64_t* counter_ptr() {
|
||||
return static_cast<int64_t*>(refcounted_shared_mem_.get()) + next_offset_;
|
||||
uint64_t* counter_ptr() {
|
||||
return static_cast<uint64_t*>(refcounted_shared_mem_.get()) + next_offset_;
|
||||
}
|
||||
|
||||
void set_counter(uint64_t value) {
|
||||
@ -103,7 +103,7 @@ struct CudaIPCRefCountersFile final {
|
||||
return used_slots_;
|
||||
}
|
||||
|
||||
int64_t get_offset() {
|
||||
uint64_t get_offset() {
|
||||
return next_offset_;
|
||||
}
|
||||
|
||||
|
@ -134,8 +134,8 @@ static PyObject* THPStorage_pynew(
|
||||
torch::ParsedArgs<3> parsed_args;
|
||||
auto r = parser.parse(args, kwargs, parsed_args);
|
||||
|
||||
int64_t allocator_arg_idx = 0;
|
||||
int64_t device_arg_idx = 1;
|
||||
int allocator_arg_idx = 0;
|
||||
int device_arg_idx = 1;
|
||||
|
||||
if (r.idx > 0) {
|
||||
allocator_arg_idx = 1;
|
||||
|
@ -186,7 +186,7 @@ Node* createONNXConstant(
|
||||
at::Tensor value) {
|
||||
Node* constant_node = graph->create(onnx::Constant, 1);
|
||||
constant_node->insertBefore(n_to_insert_before);
|
||||
constant_node->t_(attr::value, value);
|
||||
constant_node->t_(attr::value, std::move(value));
|
||||
return constant_node;
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user