[1/N] Fix clang-tidy warnings in torch/csrc/autograd (#133180)

Fixes #ISSUE_NUMBER

Pull Request resolved: https://github.com/pytorch/pytorch/pull/133180
Approved by: https://github.com/albanD
This commit is contained in:
cyy
2024-08-13 03:36:10 +00:00
committed by PyTorch MergeBot
parent 4671e98656
commit af7830e353
10 changed files with 22 additions and 47 deletions

View File

@ -12,8 +12,7 @@
#include <stdexcept>
#include <utility>
namespace torch {
namespace autograd {
namespace torch::autograd {
// AccumulateGrad sets sequence_nr to the max value so it's always called
// ASAP during backwards.
@ -104,5 +103,4 @@ variable_list AccumulateGrad::apply_with_saved(
return variable_list();
}
} // namespace autograd
} // namespace torch
} // namespace torch::autograd

View File

@ -16,8 +16,7 @@
#include <mutex>
namespace torch {
namespace autograd {
namespace torch::autograd {
#define CHECK_RESULT(RESULT, VAR) \
if (!(RESULT.is_sparse() || VAR.is_sparse() || RESULT.is_sparse_csr() || \
@ -273,5 +272,4 @@ struct TORCH_API AccumulateGrad : public Node {
#undef CHECK_RESULT
} // namespace autograd
} // namespace torch
} // namespace torch::autograd

View File

@ -10,8 +10,7 @@
#include <memory>
#include <utility>
namespace torch {
namespace autograd {
namespace torch::autograd {
auto Error::apply(variable_list&& inputs) -> variable_list {
throw std::runtime_error(msg);
@ -79,5 +78,4 @@ variable_list GraphRoot::apply_with_saved(
return result;
}
} // namespace autograd
} // namespace torch
} // namespace torch::autograd

View File

@ -9,8 +9,7 @@
#include <string>
#include <vector>
namespace torch {
namespace autograd {
namespace torch::autograd {
struct TORCH_API Error : public Node {
Error(std::string msg, edge_list&& next_edges)
@ -45,9 +44,7 @@ struct TORCH_API NotImplemented : public Error {
// @once_differentiable
struct TORCH_API DelayedError : public Node {
DelayedError(std::string msg, int64_t num_inputs) : msg(std::move(msg)) {
// NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores)
for (const auto i : c10::irange(num_inputs)) {
(void)i; // Suppress unused variable warning
for (const auto _ [[maybe_unused]] : c10::irange(num_inputs)) {
add_input_metadata(Node::undefined_input());
}
}
@ -107,5 +104,4 @@ struct TORCH_API Identity : public Node {
variable_list apply(variable_list&& inputs) override;
};
} // namespace autograd
} // namespace torch
} // namespace torch::autograd

View File

@ -9,12 +9,10 @@
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <cstddef>
#include <memory>
#include <vector>
namespace torch {
namespace autograd {
namespace torch::autograd {
Scatter::Scatter(
std::vector<at::Device> devices,
std::optional<std::vector<int64_t>> chunk_sizes,
@ -137,5 +135,4 @@ variable_list Gather::apply(variable_list&& inputs) {
return {variable};
}
} // namespace autograd
} // namespace torch
} // namespace torch::autograd

View File

@ -11,8 +11,7 @@
#include <cstddef>
#include <vector>
namespace torch {
namespace autograd {
namespace torch::autograd {
struct TORCH_CUDA_CU_API Scatter : public Node {
explicit Scatter(
@ -43,5 +42,4 @@ struct TORCH_CUDA_CU_API Gather : public Node {
int64_t dim_;
};
} // namespace autograd
} // namespace torch
} // namespace torch::autograd

View File

@ -10,13 +10,11 @@
#include <ATen/ATen.h>
#include <c10/util/irange.h>
#include <cstddef>
#include <memory>
#include <stdexcept>
#include <utility>
namespace torch {
namespace autograd {
namespace torch::autograd {
auto CopyBackwards::apply(variable_list&& grads) -> variable_list {
check_input_variables("CopyBackwards", grads, 1, -1, true);
@ -213,5 +211,4 @@ auto CopySlices::apply(variable_list&& inputs1) -> variable_list {
});
}
} // namespace autograd
} // namespace torch
} // namespace torch::autograd

View File

@ -11,8 +11,7 @@
#include <cstdint>
#include <memory>
namespace torch {
namespace autograd {
namespace torch::autograd {
struct TORCH_API CopyBackwards : public Node {
variable_list apply(variable_list&& grads) override;
@ -182,5 +181,4 @@ struct TORCH_API CopySlices : public Node {
std::shared_ptr<Node> fn;
};
} // namespace autograd
} // namespace torch
} // namespace torch::autograd

View File

@ -6,10 +6,8 @@
#include <torch/csrc/autograd/variable.h>
#include <sstream>
#include <vector>
namespace torch {
namespace autograd {
namespace torch::autograd {
variable_list wrap_outputs(
const variable_list& inputs,
@ -66,5 +64,4 @@ void check_input_variables(
}
}
}
} // namespace autograd
} // namespace torch
} // namespace torch::autograd

View File

@ -13,8 +13,7 @@
#include <memory>
#include <vector>
namespace torch {
namespace autograd {
namespace torch::autograd {
using function_constructor = std::function<std::shared_ptr<Node>(edge_list&&)>;
@ -105,10 +104,9 @@ inline bool isFwGradDefinedTensorList(
bool ret = false;
for (auto i : c10::irange(li.size())) {
auto t = li.get(i);
ret |= (t.has_value() && isFwGradDefined(t.value()));
ret |= isFwGradDefined(t);
}
return ret;
}
} // namespace autograd
} // namespace torch
} // namespace torch::autograd