[17/N] Fix extra warnings brought by clang-tidy-17 (#143804)

Fixes #ISSUE_NUMBER

Pull Request resolved: https://github.com/pytorch/pytorch/pull/143804
Approved by: https://github.com/Skylion007
This commit is contained in:
cyy
2024-12-25 19:54:42 +00:00
committed by PyTorch MergeBot
parent a8ac3a6b20
commit f42cff4e29
6 changed files with 29 additions and 26 deletions

View File

@ -92,6 +92,7 @@ class MatrixRef {
/// The declaration here is extra complicated so that "arrayRef = {}"
/// continues to select the move assignment operator.
template <typename U>
// NOLINTNEXTLINE(cppcoreguidelines-missing-std-forward)
std::enable_if_t<std::is_same_v<U, T>, MatrixRef<T>>& operator=(
// NOLINTNEXTLINE(cppcoreguidelines-missing-std-forward)
U&& Temporary) = delete;

View File

@ -16,19 +16,18 @@ using namespace dnnl;
using namespace at::native;
using namespace at::native::onednn;
namespace at::native {
namespace xpu {
namespace at::native::xpu {
namespace impl {
struct ConvParams {
std::vector<int64_t> stride;
std::vector<int64_t> padding;
std::vector<int64_t> dilation;
bool transposed;
bool transposed{};
std::vector<int64_t> output_padding;
int groups;
bool benchmark;
bool deterministic;
int64_t groups{};
bool benchmark{};
bool deterministic{};
bool is_strided() const;
bool is_dilated() const;
@ -58,7 +57,7 @@ std::ostream& operator<<(std::ostream& out, const ConvParams& params) {
bool ConvParams::is_strided() const {
bool is_strided = false;
for (int s : stride) {
for (auto s : stride) {
is_strided |= (s != 1);
}
return is_strided;
@ -66,7 +65,7 @@ bool ConvParams::is_strided() const {
bool ConvParams::is_dilated() const {
bool is_dilated = false;
for (int d : dilation) {
for (auto d : dilation) {
is_dilated |= (d != 1);
}
return is_dilated;
@ -74,7 +73,7 @@ bool ConvParams::is_dilated() const {
bool ConvParams::is_padded() const {
bool is_padded = false;
for (int p : padding) {
for (auto p : padding) {
is_padded |= (p != 0);
}
return is_padded;
@ -82,7 +81,7 @@ bool ConvParams::is_padded() const {
bool ConvParams::is_output_padding_neg() const {
bool is_non_neg = false;
for (int p : output_padding) {
for (auto p : output_padding) {
is_non_neg |= (p < 0);
}
return is_non_neg;
@ -99,7 +98,7 @@ bool ConvParams::is_output_padding_big() const {
bool ConvParams::is_padding_neg() const {
bool is_non_neg = false;
for (int p : padding) {
for (auto p : padding) {
is_non_neg |= (p < 0);
}
return is_non_neg;
@ -107,7 +106,7 @@ bool ConvParams::is_padding_neg() const {
bool ConvParams::is_stride_nonpos() const {
bool is_nonpos = false;
for (int s : stride) {
for (auto s : stride) {
is_nonpos |= (s <= 0);
}
return is_nonpos;
@ -246,7 +245,7 @@ static void check_shape_forward(
std::ostringstream output_ss;
std::string separator = "";
for (int i = 0, len = input_shape.size(); i < len; ++i) {
for (size_t i = 0, len = input_shape.size(); i < len; ++i) {
input_ss << separator << input_shape[i];
kernel_ss << separator << kernel_shape[i];
separator = " x ";
@ -496,8 +495,8 @@ Tensor _convolution_out(
// (padding_left, padding_right,
// padding_top, padding_bottom,
// padding_front, padding_back)
if (pad_nd.vec().size() > 0) {
for (int i = 0; i < dim; ++i) {
if (!pad_nd.vec().empty()) {
for (int64_t i = 0; i < dim; ++i) {
padding_front_top_left[i] += pad_nd[2 * dim - 2 * i - 2]; // 4, 2, 0
padding_back_bottom_right[i] += pad_nd[2 * dim - 2 * i - 1]; // 5, 3, 1
}
@ -628,8 +627,8 @@ std::tuple<Tensor, Tensor, Tensor> convolution_backward_overrideable(
Tensor grad_output_, input_, weight_;
IntArrayRef stride_, padding_, dilation_, output_padding_;
bool transposed_;
int64_t groups_;
bool transposed_ = false;
int64_t groups_ = 0;
ConvParams params;
if (3 == ndim) {
grad_output_ = view4d(grad_output);
@ -744,5 +743,4 @@ TORCH_LIBRARY_IMPL(aten, XPU, m) {
TORCH_FN(convolution_backward_overrideable));
}
} // namespace xpu
} // namespace at::native
} // namespace at::native::xpu

View File

@ -114,7 +114,7 @@ dnnl::memory::dims get_onednn_strides(const at::Tensor& tensor) {
}
dnnl::memory::desc get_onednn_md(const at::Tensor& tensor) {
Tensor t = tensor.sizes().size() == 0 ? tensor.unsqueeze(0) : tensor;
Tensor t = tensor.sizes().empty() ? tensor.unsqueeze(0) : tensor;
return {get_onednn_dims(t), get_onednn_dtype(t), get_onednn_strides(t)};
}
@ -130,8 +130,8 @@ bool onednn_strides_check(const Tensor& src) {
dnnl_memory_desc_t md;
dnnl_memory_desc_create_with_strides(&md, ndims, dims, data_type, strides);
dnnl_format_kind_t md_fmt_kind;
int md_ndims;
int md_inner_nblks;
int md_ndims = 0;
int md_inner_nblks = 0;
dnnl_dims_t* md_padded_dims = nullptr;
dnnl_memory_desc_query(md, dnnl_query_inner_nblks_s32, &md_inner_nblks);

View File

@ -33,6 +33,8 @@ struct TORCH_XPU_API GpuEngineManager {
GpuEngineManager(GpuEngineManager const&) = delete;
GpuEngineManager& operator=(GpuEngineManager const&) = delete;
GpuEngineManager(GpuEngineManager&&) = default;
GpuEngineManager& operator=(GpuEngineManager&&) = default;
protected:
GpuEngineManager() {
@ -44,7 +46,7 @@ struct TORCH_XPU_API GpuEngineManager {
c10::xpu::get_raw_device(i), c10::xpu::get_device_context())));
}
}
~GpuEngineManager() {}
~GpuEngineManager() = default;
private:
std::vector<std::shared_ptr<dnnl::engine>> engine_pool;
@ -71,6 +73,8 @@ struct TORCH_XPU_API GpuStreamManager {
GpuStreamManager(GpuStreamManager const&) = delete;
GpuStreamManager& operator=(GpuStreamManager const&) = delete;
GpuStreamManager(GpuStreamManager&&) = default;
GpuStreamManager& operator=(GpuStreamManager&&) = default;
protected:
GpuStreamManager() {
@ -78,7 +82,7 @@ struct TORCH_XPU_API GpuStreamManager {
TORCH_INTERNAL_ASSERT(device_count > 0);
stream_pool.resize(device_count);
}
~GpuStreamManager() {}
~GpuStreamManager() = default;
private:
using stream_hash_map =

View File

@ -20,7 +20,7 @@ using namespace torch;
PyObject* THPGeneratorClass = nullptr;
PyObject* THPGenerator_initDefaultGenerator(at::Generator cdata) {
PyObject* THPGenerator_initDefaultGenerator(const at::Generator& cdata) {
auto type = (PyTypeObject*)THPGeneratorClass;
auto self = THPObjectPtr{type->tp_alloc(type, 0)};
if (!self)

View File

@ -14,7 +14,7 @@ struct THPGenerator {
// is borrowed. The caller should ensure that the at::Generator object lifetime
// last at least as long as the Python wrapper.
TORCH_PYTHON_API PyObject* THPGenerator_initDefaultGenerator(
at::Generator cdata);
const at::Generator& cdata);
#define THPGenerator_Check(obj) PyObject_IsInstance(obj, THPGeneratorClass)