Files
vllm-ascend/csrc/mla_preprocess/op_host/tiling/mla_preprocess_tiling.h
Chen Chen bcc313e8f2 add mla_preprocess kernel (#3226)
### What this PR does / why we need it?

- Adds the `mla_preprocess` custom kernel to provide an optimized
pre-processing operator for Multi-head Latent Attention (MLA) on Ascend
NPUs.
- Wires the new kernel into the C++ extension pipeline so vLLM can
invoke it directly, cutting Python-side tensor shuffling and memory
copies that previously bottlenecked MLA compilation paths.

### Does this PR introduce any user-facing change?

- No. The change only introduces a low-level kernel; public APIs and
inference behavior remain unchanged.

### How was this patch tested?

- Dedicated Ascend kernels are not covered by our CI yet, so no extra
automated tests were added. Future MLA-focused regression runs will
cover this path.

- vLLM version: v0.11.0

Signed-off-by: Chen Chen <0109chenchen@gmail.com>
2025-10-12 07:39:45 +08:00

96 lines
2.6 KiB
C++

// Adapted from
// https://gitee.com/ascend/ascend-transformer-boost
//
// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
// This file is a part of the CANN Open Software.
// Licensed under CANN Open Software License Agreement Version 1.0 (the "License").
// Please refer to the License for details. You may not use this file except in compliance with the License.
// THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED,
// INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.
// See LICENSE in the root of the software repository for the full text of the License.
//
#ifndef MLAPREPROCESS_TILING_H
#define MLAPREPROCESS_TILING_H
#include <cstdint>
struct PpMatmulTilingData {
uint32_t numBatch{0};
uint32_t m{0};
uint32_t k{0};
uint32_t n{0};
uint32_t m0{0};
uint32_t k0{0};
uint32_t n0{0};
uint32_t mLoop{0};
uint32_t kLoop{0};
uint32_t nLoop{0};
uint32_t coreLoop{0};
uint32_t swizzleCount{0};
uint32_t swizzleDirect{0};
uint32_t enShuffleK{0};
uint32_t blockDim{0};
uint32_t enLoadAllAmat{0};
uint32_t b0matPingPongBufferLen{0};
};
struct MlaTilingData {
uint32_t tilingKey{0};
uint64_t userWorkspaceSize{0};
uint64_t s1Offset{0};
uint64_t s2Offset{0};
uint64_t s3Offset{0};
uint64_t s4Offset{0};
uint64_t s5Offset{0};
uint32_t numCore{0};
uint32_t n{0};
uint32_t perTaskNum{0};
uint32_t resTaskNum{0};
PpMatmulTilingData mm1;
PpMatmulTilingData mm2;
PpMatmulTilingData mm3;
// rms1
uint32_t rmsNumCore1{0};
uint32_t rmsNumCol1{0};
uint32_t rmsNumRow1{0};
uint32_t rmsQuantMin1{0};
// rms2
uint32_t rmsNumCore2{0};
uint32_t rmsNumCol2{0};
uint32_t rmsNumRow2{0};
uint32_t rmsQuantMin2{0};
uint32_t hiddenSizeQ{0};
uint32_t headNumQ{0};
uint32_t headDim{0};
uint32_t concatSize{0};
uint32_t rotaryCoeff{0};
uint32_t ntokens{0};
uint32_t realCore{0};
uint32_t nlCoreRun{0};
uint32_t lCoreRun{0};
uint32_t maxNPerLoopForUb{0};
uint32_t preCoreLoopTime{0};
uint32_t preCoreLoopNLast{0};
uint32_t lastCoreLoopTime{0};
uint32_t lastCoreLoopNLast{0};
// EinSumQuant
uint32_t esqFrontCore{0};
uint32_t esqTailCore{0};
uint32_t esqFrontCoreBatch{0};
uint32_t esqTailCoreBatch{0};
uint32_t esqHeadNum{0};
uint32_t esqColNum{0};
uint32_t esqUbHeadLoop{0};
uint32_t esqHeadPerLoop{0};
uint32_t esqHeadTail{0};
uint32_t esqColLoop{0};
uint32_t esqColTail{0};
};
#endif // MLAPREPROCESS_TILING_H