mirror of
https://github.com/vllm-project/vllm-ascend.git
synced 2025-10-20 13:43:53 +08:00
### What this PR does / why we need it? - Adds the `mla_preprocess` custom kernel to provide an optimized pre-processing operator for Multi-head Latent Attention (MLA) on Ascend NPUs. - Wires the new kernel into the C++ extension pipeline so vLLM can invoke it directly, cutting Python-side tensor shuffling and memory copies that previously bottlenecked MLA compilation paths. ### Does this PR introduce any user-facing change? - No. The change only introduces a low-level kernel; public APIs and inference behavior remain unchanged. ### How was this patch tested? - Dedicated Ascend kernels are not covered by our CI yet, so no extra automated tests were added. Future MLA-focused regression runs will cover this path. - vLLM version: v0.11.0 Signed-off-by: Chen Chen <0109chenchen@gmail.com>
68 lines
3.7 KiB
C++
68 lines
3.7 KiB
C++
/* Adapted from
|
|
* https://gitee.com/ascend/ascend-transformer-boost.git
|
|
*
|
|
* Copyright (c) 2024 Huawei Technologies Co., Ltd.
|
|
* This file is a part of the CANN Open Software.
|
|
* Licensed under CANN Open Software License Agreement Version 1.0 (the "License").
|
|
* Please refer to the License for details. You may not use this file except in compliance with the License.
|
|
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED,
|
|
* INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.
|
|
* See LICENSE in the root of the software repository for the full text of the License.
|
|
*/
|
|
#ifndef INCLUDE_MMA_H
|
|
#define INCLUDE_MMA_H
|
|
|
|
#include "hardware.h"
|
|
#include "kernel_tensor.h"
|
|
|
|
template <ArchType ArchTag, typename ElementA, typename ElementB, typename AccDTypeC, bool IsTransposeA>
|
|
struct mmad {
|
|
__aicore__ mmad(AscendC::LocalTensor<AccDTypeC> l0cTensor, AscendC::LocalTensor<ElementA> l0aTensor,
|
|
AscendC::LocalTensor<ElementB> l0bTensor, uint32_t mTileActual, uint32_t nTileActual,
|
|
uint32_t kPartActual, bool initC, uint8_t unitFlag = 0) {};
|
|
|
|
__aicore__ mmad(AscendC::LocalTensor<AccDTypeC> l0cTensor, AscendC::LocalTensor<ElementA> l0aTensor,
|
|
AscendC::LocalTensor<ElementB> l0bTensor, uint64_t biasBt, uint32_t mTileActual,
|
|
uint32_t nTileActual, uint32_t kPartActual, bool initC, uint8_t unitFlag = 0) {};
|
|
};
|
|
|
|
// Partial specialization for V220, int8_t, not_vector_A, not TransposeA
|
|
template <ArchType ArchTag, typename AccDTypeC, typename ElementA, typename ElementB>
|
|
struct mmad<ArchTag, ElementA, ElementB, AccDTypeC, false> {
|
|
__aicore__ mmad(AscendC::LocalTensor<AccDTypeC> l0cTensor, AscendC::LocalTensor<ElementA> l0aTensor,
|
|
AscendC::LocalTensor<ElementB> l0bTensor, uint32_t mTileActual, uint32_t nTileActual,
|
|
uint32_t kPartActual, bool initC, uint8_t unitFlag = 0)
|
|
{
|
|
AscendC::Mmad(l0cTensor, // C
|
|
l0aTensor, // A
|
|
l0bTensor, // B
|
|
AscendC::MmadParams(mTileActual, // m
|
|
nTileActual, // n
|
|
kPartActual, // k
|
|
unitFlag, // unitFlag
|
|
false, // cmatrixSource
|
|
initC)); // cmatrixInitVal
|
|
};
|
|
|
|
__aicore__ mmad(AscendC::LocalTensor<AccDTypeC> l0cTensor, AscendC::LocalTensor<ElementA> l0aTensor,
|
|
AscendC::LocalTensor<ElementB> l0bTensor, uint64_t biasBt, uint32_t mTileActual,
|
|
uint32_t nTileActual, uint32_t kPartActual, bool initC, uint8_t unitFlag = 0)
|
|
{
|
|
AscendC::LocalTensor<AccDTypeC> biasTensor;
|
|
biasTensor.InitBuffer(biasBt, nTileActual);
|
|
biasTensor.address_.logicPos = static_cast<uint8_t>(AscendC::TPosition::C2);
|
|
AscendC::Mmad(l0cTensor, // C
|
|
l0aTensor, // A
|
|
l0bTensor, // B
|
|
biasTensor, // bt
|
|
AscendC::MmadParams(mTileActual, // m
|
|
nTileActual, // n
|
|
kPartActual, // k
|
|
unitFlag, // unitFlag
|
|
true, // cmatrixSource
|
|
false)); // cmatrixInitVal
|
|
};
|
|
};
|
|
|
|
#endif
|