mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 12:54:11 +08:00
[BE] Use C10_UNUSED
(#135914)
Instead of `(void)foo; // Suppress unused variable` Pull Request resolved: https://github.com/pytorch/pytorch/pull/135914 Approved by: https://github.com/huydhn, https://github.com/eqy
This commit is contained in:
committed by
PyTorch MergeBot
parent
062681a0ed
commit
1c04cbfba6
@ -473,8 +473,7 @@ void cpu_flash_attention(
|
||||
scalar_t* transpose_buffer_ptr = transpose_buffer.get();
|
||||
std::unique_ptr<scalar_t[]> v_copy_buffer = std::make_unique<scalar_t[]>(ekvSplitSize * packb_size);
|
||||
scalar_t* v_copy_buffer_ptr = v_copy_buffer.get();
|
||||
for (const auto z : c10::irange(begin, end)) {
|
||||
(void)z; // Suppress unused variable
|
||||
for (C10_UNUSED auto z : c10::irange(begin, end)) {
|
||||
n = l * kvSplitSize;
|
||||
int64_t kvBlockSize = std::min(kvSplitSize, kvSize - n);
|
||||
int64_t ekvBlockSize = kvBlockSize % 2 == 0 ? kvBlockSize : kvBlockSize + 1;
|
||||
@ -567,8 +566,7 @@ void cpu_flash_attention(
|
||||
? query_padding_ptr + ompIdx * qSplitSize * eheadSize
|
||||
: nullptr;
|
||||
|
||||
for (const auto z : c10::irange(begin, end)) {
|
||||
(void)z; // Suppress unused variable
|
||||
for (C10_UNUSED auto z : c10::irange(begin, end)) {
|
||||
int64_t m = k * qSplitSize;
|
||||
int64_t qBlockSize = std::min(qSplitSize, qSize - m);
|
||||
// Initialize max and sum
|
||||
@ -933,8 +931,7 @@ void cpu_flash_attention_backward(
|
||||
|
||||
at::Tensor dsum = at::empty({qSplitSize}, query.options().dtype(accumulate_dtype));
|
||||
accum_t* dsum_data = dsum.data_ptr<accum_t>();
|
||||
for (const auto z : c10::irange(begin, end)) {
|
||||
(void)z; // Suppress unused variable
|
||||
for (C10_UNUSED auto z : c10::irange(begin, end)) {
|
||||
// rowsum of grad_out * out
|
||||
for (int64_t m = 0; m < qSize; m += qSplitSize) {
|
||||
int64_t qBlockSize = std::min(qSplitSize, qSize - m);
|
||||
|
@ -177,12 +177,11 @@ struct KthValueLauncher {
|
||||
cuda::detail::TensorInfo<scalar_t, index_t> values_info,
|
||||
int collapse_values_dim,
|
||||
cuda::detail::TensorInfo<int64_t, index_t> indices_info,
|
||||
int collapse_indices_dim,
|
||||
C10_UNUSED int collapse_indices_dim,
|
||||
cuda::detail::TensorInfo<const scalar_t, index_t> self_info,
|
||||
int collapse_self_dim,
|
||||
int64_t num_slices,
|
||||
int64_t slice_size) {
|
||||
(void)collapse_indices_dim; // Suppress unused variable warning
|
||||
dim3 grid;
|
||||
if (!getGridFromTiles(num_slices, grid)) {
|
||||
AT_ERROR("slices are too many");
|
||||
@ -213,15 +212,13 @@ struct MedianLauncher {
|
||||
template <typename scalar_t, typename index_t, int all_dims>
|
||||
inline void launch(
|
||||
cuda::detail::TensorInfo<scalar_t, index_t> values_info,
|
||||
int collapse_values_dim,
|
||||
C10_UNUSED int collapse_values_dim,
|
||||
cuda::detail::TensorInfo<int64_t, index_t> indices_info,
|
||||
int collapse_indices_dim,
|
||||
C10_UNUSED int collapse_indices_dim,
|
||||
cuda::detail::TensorInfo<const scalar_t, index_t> self_info,
|
||||
int collapse_self_dim,
|
||||
int64_t num_slices,
|
||||
int64_t slice_size) {
|
||||
(void)collapse_values_dim; // Suppress unused variable warning
|
||||
(void)collapse_indices_dim; // Suppress unused variable warning
|
||||
dim3 grid;
|
||||
if (!getGridFromTiles(num_slices, grid)) {
|
||||
AT_ERROR("slices are too many");
|
||||
|
@ -904,8 +904,7 @@ std::tuple<Tensor, Tensor, Tensor> layer_norm_mps(const Tensor& input,
|
||||
for (const auto idx : c10::irange(axis)) {
|
||||
stat_shape.push_back(input_shape[idx]);
|
||||
}
|
||||
for (const auto idx : c10::irange(axis, input.dim())) {
|
||||
(void)idx; // Suppress unused variable
|
||||
for (C10_UNUSED auto idx : c10::irange(axis, input.dim())) {
|
||||
stat_shape.push_back(1);
|
||||
}
|
||||
mean = mean.view(stat_shape);
|
||||
|
Reference in New Issue
Block a user