mirror of
https://github.com/vllm-project/vllm.git
synced 2025-10-20 14:53:52 +08:00
Signed-off-by: nicole-lihui <nicole.li@daocloud.io> Signed-off-by: yewentao256 <zhyanwentao@126.com> Signed-off-by: courage17340 <courage17340@163.com> Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk> Signed-off-by: Jacob Kahn <jacobkahn1@gmail.com> Signed-off-by: Tyler Michael Smith <tlrmchlsmth@gmail.com> Signed-off-by: Fadi Arafeh <fadi.arafeh@arm.com> Signed-off-by: Roger Wang <hey@rogerw.io> Signed-off-by: Agata Dobrzyniewicz <adobrzyniewicz@habana.ai> Signed-off-by: Isotr0py <mozf@mail2.sysu.edu.cn> Signed-off-by: zxw <1020938856@qq.com> Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> Signed-off-by: wang.yuqi <noooop@126.com> Signed-off-by: Cyrus Leung <cyrus.tl.leung@gmail.com> Signed-off-by: Kunshang Ji <kunshang.ji@intel.com> Signed-off-by: chenlang <chen.lang5@zte.com.cn> Signed-off-by: youkaichao <youkaichao@gmail.com> Signed-off-by: Jonas Kuebler <kuebj@amazon.com> Signed-off-by: jiang1.li <jiang1.li@intel.com> Signed-off-by: Russell Bryant <rbryant@redhat.com> Signed-off-by: NickLucche <nlucches@redhat.com> Signed-off-by: Tyler Michael Smith <tyler@neuralmagic.com> Signed-off-by: AlonKejzman <alonkeizman@gmail.com> Signed-off-by: Lucas Wilkinson <lwilkins@redhat.com> Signed-off-by: taohui <taohui3@gmail.com> Signed-off-by: Tao Hui <taohui3@gmail.com> Signed-off-by: Matthew Bonanni <mbonanni@redhat.com> Signed-off-by: Matthew Bonanni <mbonanni001@gmail.com> Signed-off-by: Jee Jee Li <pandaleefree@gmail.com> Signed-off-by: Ekagra Ranjan <3116519+ekagra-ranjan@users.noreply.github.com> Signed-off-by: Zhuohan Li <zhuohan123@gmail.com> Signed-off-by: Tomer Asida <57313761+tomeras91@users.noreply.github.com> Signed-off-by: Shu Wang. <shuw@nvidia.com> Signed-off-by: Nick Hill <nhill@redhat.com> Signed-off-by: Aleksandr Malyshev <maleksan@amd.com> Signed-off-by: Eugene Khvedchenia <ekhvedchenia@nvidia.com> Signed-off-by: Eugene Khvedchenya <ekhvedchenya@gmail.com> Signed-off-by: yiting.jiang <yiting.jiang@daocloud.io> Signed-off-by: Andrew Sansom <andrew@protopia.ai> Signed-off-by: xaguilar <Xavier.AguilarFruto@amd.com> Signed-off-by: Iceber Gu <caiwei95@hotmail.com> Signed-off-by: Tao He <linzhu.ht@alibaba-inc.com> Signed-off-by: Icey <1790571317@qq.com> Signed-off-by: Sage Moore <sage@neuralmagic.com> Signed-off-by: 许文卿 <xwq391974@alibaba-inc.com> Signed-off-by: Chih-Chieh-Yang <7364402+cyang49@users.noreply.github.com> Signed-off-by: chaunceyjiang <chaunceyjiang@gmail.com> Signed-off-by: Seiji Eicher <seiji@anyscale.com> Signed-off-by: Seiji Eicher <58963096+eicherseiji@users.noreply.github.com> Signed-off-by: zjy0516 <riverclouds.zhu@qq.com> Signed-off-by: Kosseila (CloudThrill) <klouddude@gmail.com> Signed-off-by: frankwang28 <frank.wbb@hotmail.com> Signed-off-by: Frank Wang <41319051+frankwang28@users.noreply.github.com> Signed-off-by: mgoin <mgoin64@gmail.com> Signed-off-by: fhl2000 <63384265+fhl2000@users.noreply.github.com> Signed-off-by: zixi-qi <qizixi@meta.com> Signed-off-by: Bram Wasti <bwasti@meta.com> Signed-off-by: Naman Lalit <nl2688@nyu.edu> Signed-off-by: Chenheli Hua <huachenheli@outlook.com> Signed-off-by: Junhong <liujunhong11@huawei.com> Signed-off-by: Junhong Liu <98734602+LJH-LBJ@users.noreply.github.com> Signed-off-by: 22quinn <33176974+22quinn@users.noreply.github.com> Signed-off-by: rentianyue-jk <rentianyue-jk@360shuke.com> Signed-off-by: Peter Pan <Peter.Pan@daocloud.io> Signed-off-by: Patrick Toulme <ptoulme@meta.com> Signed-off-by: Patrick Toulme <pctoulme+1@gmail.com> Signed-off-by: Jiangyun Zhu <riverclouds.zhu@qq.com> Signed-off-by: Clayton Coleman <smarterclayton@gmail.com> Signed-off-by: Jialin Ouyang <jialino@meta.com> Signed-off-by: Jialin Ouyang <Jialin.Ouyang@gmail.com> Signed-off-by: Weiliang Liu <weiliangl@nvidia.com> Signed-off-by: zRzRzRzRzRzRzR <2448370773@qq.com> Signed-off-by: liuye.hj <liuye.hj@alibaba-inc.com> Signed-off-by: Juechen Liu <jueliu@meta.com> Signed-off-by: simon-mo <simon.mo@hey.com> Signed-off-by: Robert Shaw <robshaw@redhat.com> Signed-off-by: Thomas Parnell <tpa@zurich.ibm.com> Signed-off-by: isotr0py <2037008807@qq.com> Signed-off-by: yingjun-mou <renzomou@gmail.com> Signed-off-by: zhoukz <me@zhoukz.com> Signed-off-by: Chenxi Yang <cxyang@fb.com> Signed-off-by: Rahul Tuli <rtuli@redhat.com> Signed-off-by: Lee Nau <lnau@nvidia.com> Signed-off-by: adabeyta <aabeyta@redhat.com> Signed-off-by: Gregory Shtrasberg <Gregory.Shtrasberg@amd.com> Signed-off-by: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Signed-off-by: simondanielsson <simon.danielsson99@hotmail.com> Signed-off-by: Chen Zhang <zhangch99@outlook.com> Signed-off-by: Yongye Zhu <zyy1102000@gmail.com> Signed-off-by: Barry Kang <43644113+Barry-Delaney@users.noreply.github.com> Signed-off-by: Lucia Fang <fanglu@meta.com> Signed-off-by: a120092009 <zhaoty0121@gmail.com> Signed-off-by: sergiopaniego <sergiopaniegoblanco@gmail.com> Signed-off-by: Sergio Paniego Blanco <sergiopaniegoblanco@gmail.com> Signed-off-by: wangyafeng <wangyafeng@baidu.com> Signed-off-by: Lehua Ding <lehuading@tencent.com> Signed-off-by: lyd1992 <liuyudong@iscas.ac.cn> Signed-off-by: ihb2032 <1355790728@qq.com> Signed-off-by: asafg <39553475+Josephasafg@users.noreply.github.com> Signed-off-by: anion <1005128408@qq.com> Signed-off-by: Anion <123177548+Anionex@users.noreply.github.com> Signed-off-by: Pavani Majety <pmajety@nvidia.com> Signed-off-by: Bill Nell <bnell@redhat.com> Signed-off-by: bnellnm <49004751+bnellnm@users.noreply.github.com> Signed-off-by: Or Ozeri <oro@il.ibm.com> Signed-off-by: cjackal <44624812+cjackal@users.noreply.github.com> Signed-off-by: David Ben-David <davidb@pliops.com> Signed-off-by: Andrew Xia <axia@meta.com> Signed-off-by: Andrew Xia <axia@fb.com> Signed-off-by: Lu Fang <fanglu@fb.com> Signed-off-by: Salvatore Cena <cena@cenas.it> Signed-off-by: padg9912 <phone.and.desktop@gmail.com> Signed-off-by: nadathurv <work.vnadathur@gmail.com> Signed-off-by: WorldExplored <srreyansh.sethi@gmail.com> Signed-off-by: wwl2755 <wangwenlong2755@gmail.com> Signed-off-by: billishyahao <bill.he@amd.com> Signed-off-by: Nathan Scott <nathans@redhat.com> Signed-off-by: Kenichi Maehashi <maehashi@preferred.jp> Signed-off-by: Johnny <johnnynuca14@gmail.com> Signed-off-by: johnnynunez <johnnynuca14@gmail.com> Signed-off-by: Johnny <johnnync13@gmail.com> Signed-off-by: Huamin Li <3ericli@gmail.com> Signed-off-by: Hosang Yoon <hosang.yoon@amd.com> Signed-off-by: Jerry Zhang <jerryzh168@gmail.com> Signed-off-by: Peter Schuurman <psch@google.com> Signed-off-by: Huy Do <huydhn@gmail.com> Signed-off-by: leo-pony <nengjunma@outlook.com> Signed-off-by: vllmellm <vllm.ellm@embeddedllm.com> Signed-off-by: Lucas Wilkinson <LucasWilkinson@users.noreply.github.com> Signed-off-by: ElizaWszola <ewszola@redhat.com> Signed-off-by: ElizaWszola <elizaw.9289@gmail.com> Signed-off-by: Luka Govedič <lgovedic@redhat.com> Signed-off-by: Luka Govedič <ProExpertProg@users.noreply.github.com> Signed-off-by: Michael Goin <mgoin64@gmail.com> Signed-off-by: Benjamin Chislett <bchislett@nvidia.com> Signed-off-by: tjtanaa <tunjian.tan@embeddedllm.com> Signed-off-by: zhewenli <zhewenli@meta.com> Signed-off-by: ahao-anyscale <ahao@anyscale.com> Signed-off-by: Varun Sundar Rabindranath <vsundarr@redhat.com> Signed-off-by: huijjj <huijong.jeong@squeezebits.com> Signed-off-by: Yannick Schnider <yannick.schnider1@ibm.com> Signed-off-by: kyt <eluban4532@gmail.com> Signed-off-by: Egor <e.a.krivov@gmail.com> Signed-off-by: Yang <lymailforjob@gmail.com> Signed-off-by: Paul Pak <paulpak58@gmail.com> Signed-off-by: whx-sjtu <2952154980@qq.com> Signed-off-by: Xiang Si <sixiang@google.com> Signed-off-by: Aleksandr Samarin <astrlrd@nebius.com> Signed-off-by: Jun Jiang <jasl9187@hotmail.com> Signed-off-by: Chendi Xue <Chendi.Xue@intel.com> Signed-off-by: Chendi.Xue <chendi.xue@intel.com> Signed-off-by: Nikhil Ghosh <nikhil@anyscale.com> Co-authored-by: Nicole LiHui 🥜 <nicolelihui@outlook.com> Co-authored-by: courage17340 <courage17340@users.noreply.github.com> Co-authored-by: Cyrus Leung <tlleungac@connect.ust.hk> Co-authored-by: Jacob Kahn <jacobkahn1@gmail.com> Co-authored-by: Roger Wang <hey@rogerw.io> Co-authored-by: Nicole LiHui 🥜 <nicole.li@daocloud.io> Co-authored-by: Tyler Michael Smith <tyler@neuralmagic.com> Co-authored-by: Fadi Arafeh <115173828+fadara01@users.noreply.github.com> Co-authored-by: Agata Dobrzyniewicz <160237065+adobrzyn@users.noreply.github.com> Co-authored-by: Isotr0py <mozf@mail2.sysu.edu.cn> Co-authored-by: yyzxw <34639446+yyzxw@users.noreply.github.com> Co-authored-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> Co-authored-by: wang.yuqi <noooop@126.com> Co-authored-by: Cyrus Leung <cyrus.tl.leung@gmail.com> Co-authored-by: Kunshang Ji <kunshang.ji@intel.com> Co-authored-by: chenlang <chen.lang5@zte.com.cn> Co-authored-by: chenlang <10346245@zte.com.cn> Co-authored-by: youkaichao <youkaichao@gmail.com> Co-authored-by: Jonas M. Kübler <44084297+jmkuebler@users.noreply.github.com> Co-authored-by: Li, Jiang <jiang1.li@intel.com> Co-authored-by: Russell Bryant <rbryant@redhat.com> Co-authored-by: Nicolò Lucchesi <nlucches@redhat.com> Co-authored-by: AlonKejzman <alonkeizman@gmail.com> Co-authored-by: Michael Goin <mgoin64@gmail.com> Co-authored-by: Lucas Wilkinson <LucasWilkinson@users.noreply.github.com> Co-authored-by: Tao Hui <taohui3@gmail.com> Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> Co-authored-by: Matthew Bonanni <mbonanni@redhat.com> Co-authored-by: Jee Jee Li <pandaleefree@gmail.com> Co-authored-by: Ekagra Ranjan <3116519+ekagra-ranjan@users.noreply.github.com> Co-authored-by: Nick Hill <nhill@redhat.com> Co-authored-by: Zhuohan Li <zhuohan123@gmail.com> Co-authored-by: Ye (Charlotte) Qi <yeq@meta.com> Co-authored-by: tomeras91 <57313761+tomeras91@users.noreply.github.com> Co-authored-by: Shu Wang <shuw@nvidia.com> Co-authored-by: Aleksandr Malyshev <164964928+maleksan85@users.noreply.github.com> Co-authored-by: Aleksandr Malyshev <maleksan@amd.com> Co-authored-by: Doug Lehr <douglehr@amd.com> Co-authored-by: Eugene Khvedchenya <ekhvedchenya@gmail.com> Co-authored-by: yitingdc <59356937+yitingdc@users.noreply.github.com> Co-authored-by: Andrew Sansom <andrew@protopia.ai> Co-authored-by: xaguilar-amd <xavier.aguilarfruto@amd.com> Co-authored-by: Iceber Gu <caiwei95@hotmail.com> Co-authored-by: Tao He <linzhu.ht@alibaba-inc.com> Co-authored-by: Icey <1790571317@qq.com> Co-authored-by: Sage Moore <sage@neuralmagic.com> Co-authored-by: Robert Shaw <114415538+robertgshaw2-redhat@users.noreply.github.com> Co-authored-by: Xu Wenqing <121550081+Xu-Wenqing@users.noreply.github.com> Co-authored-by: Chih-Chieh Yang <7364402+cyang49@users.noreply.github.com> Co-authored-by: RishiAstra <40644327+RishiAstra@users.noreply.github.com> Co-authored-by: Chauncey <chaunceyjiang@gmail.com> Co-authored-by: Seiji Eicher <58963096+eicherseiji@users.noreply.github.com> Co-authored-by: Rui Qiao <161574667+ruisearch42@users.noreply.github.com> Co-authored-by: Jiangyun Zhu <riverclouds.zhu@qq.com> Co-authored-by: Luka Govedič <ProExpertProg@users.noreply.github.com> Co-authored-by: 阿丹(adan) <47373076+LDLINGLINGLING@users.noreply.github.com> Co-authored-by: liudan <adan@minicpm.com> Co-authored-by: liudan <liudan@qq.com> Co-authored-by: Lucia Fang <116399278+luccafong@users.noreply.github.com> Co-authored-by: Clouddude <kouss.hd@gmail.com> Co-authored-by: Frank Wang <41319051+frankwang28@users.noreply.github.com> Co-authored-by: fhl2000 <63384265+fhl2000@users.noreply.github.com> Co-authored-by: qizixi <22851944+zixi-qi@users.noreply.github.com> Co-authored-by: Bram Wasti <bwasti@fb.com> Co-authored-by: Naman Lalit <nl2688@nyu.edu> Co-authored-by: Chenheli Hua <huachenheli@outlook.com> Co-authored-by: WeiQing Chen <40507679+david6666666@users.noreply.github.com> Co-authored-by: Junhong <liujunhong11@huawei.com> Co-authored-by: LJH-LBJ <98734602+LJH-LBJ@users.noreply.github.com> Co-authored-by: 22quinn <33176974+22quinn@users.noreply.github.com> Co-authored-by: Xiaohan Zou <renovamenzxh@gmail.com> Co-authored-by: rentianyue-jk <rentianyue-jk@360shuke.com> Co-authored-by: Tyler Michael Smith <tlrmchlsmth@gmail.com> Co-authored-by: Peter Pan <peter.pan@daocloud.io> Co-authored-by: Patrick C. Toulme <135739773+patrick-toulme@users.noreply.github.com> Co-authored-by: Clayton Coleman <smarterclayton@gmail.com> Co-authored-by: Jialin Ouyang <Jialin.Ouyang@gmail.com> Co-authored-by: Jialin Ouyang <jialino@meta.com> Co-authored-by: weiliang <weiliangl@nvidia.com> Co-authored-by: Yuxuan Zhang <2448370773@qq.com> Co-authored-by: JJJYmmm <92386084+JJJYmmm@users.noreply.github.com> Co-authored-by: liuye.hj <liuye.hj@alibaba-inc.com> Co-authored-by: Juechen Liu <grinchcoder@gmail.com> Co-authored-by: Robert Shaw <robshaw@redhat.com> Co-authored-by: Thomas Parnell <tpa@zurich.ibm.com> Co-authored-by: Yingjun Mou <renzomou@gmail.com> Co-authored-by: Zhou Jiahao <me@zhoukz.com> Co-authored-by: Chenxi Yang <cxyang@cs.utexas.edu> Co-authored-by: Chenxi Yang <cxyang@fb.com> Co-authored-by: Rahul Tuli <rtuli@redhat.com> Co-authored-by: Lee Nau <lee.nau@gmail.com> Co-authored-by: Adrian Abeyta <aabeyta@redhat.com> Co-authored-by: Gregory Shtrasberg <156009573+gshtras@users.noreply.github.com> Co-authored-by: Aaron Pham <contact@aarnphm.xyz> Co-authored-by: acisseJZhong <40467976+acisseJZhong@users.noreply.github.com> Co-authored-by: Simon Danielsson <70206058+simondanielsson@users.noreply.github.com> Co-authored-by: Yongye Zhu <zyy1102000@gmail.com> Co-authored-by: Chen Zhang <zhangch99@outlook.com> Co-authored-by: Lucas Wilkinson <lwilkins@redhat.com> Co-authored-by: Lucia Fang <fanglu@meta.com> Co-authored-by: Siyuan Fu <siyuanf@nvidia.com> Co-authored-by: Xiaozhu Meng <mxz297@gmail.com> Co-authored-by: Barry Kang <43644113+Barry-Delaney@users.noreply.github.com> Co-authored-by: a120092009 <33205509+a120092009@users.noreply.github.com> Co-authored-by: Sergio Paniego Blanco <sergiopaniegoblanco@gmail.com> Co-authored-by: CSWYF3634076 <wangyafeng@baidu.com> Co-authored-by: Lehua Ding <lehuading@tencent.com> Co-authored-by: Reza Barazesh <3146276+rzabarazesh@users.noreply.github.com> Co-authored-by: ihb2032 <40718643+ihb2032@users.noreply.github.com> Co-authored-by: Asaf Joseph Gardin <39553475+Josephasafg@users.noreply.github.com> Co-authored-by: Anion <123177548+Anionex@users.noreply.github.com> Co-authored-by: Pavani Majety <pmajety@nvidia.com> Co-authored-by: bnellnm <49004751+bnellnm@users.noreply.github.com> Co-authored-by: Or Ozeri <oro@il.ibm.com> Co-authored-by: cjackal <44624812+cjackal@users.noreply.github.com> Co-authored-by: David Ben-David <sdavidbd@gmail.com> Co-authored-by: David Ben-David <davidb@pliops.com> Co-authored-by: Andrew Xia <axia@mit.edu> Co-authored-by: Andrew Xia <axia@fb.com> Co-authored-by: Salvatore Cena <cena@cenas.it> Co-authored-by: Param <psch@cs.unc.edu> Co-authored-by: Zhewen Li <zhewenli@meta.com> Co-authored-by: nadathurv <work.vnadathur@gmail.com> Co-authored-by: Srreyansh Sethi <107075589+WorldExplored@users.noreply.github.com> Co-authored-by: Wenlong Wang <wangwenlong2755@gmail.com> Co-authored-by: billishyahao <bill.he@amd.com> Co-authored-by: Nathan Scott <natoscott@users.noreply.github.com> Co-authored-by: Kenichi Maehashi <939877+kmaehashi@users.noreply.github.com> Co-authored-by: Johnny <johnnync13@gmail.com> Co-authored-by: Aidyn-A <31858918+Aidyn-A@users.noreply.github.com> Co-authored-by: Huamin Li <3ericli@gmail.com> Co-authored-by: rshaw@neuralmagic.com <rshaw@neuralmagic.com> Co-authored-by: Hosang <156028780+hyoon1@users.noreply.github.com> Co-authored-by: Jerry Zhang <jerryzh168@gmail.com> Co-authored-by: pwschuurman <psch@google.com> Co-authored-by: Huy Do <huydhn@gmail.com> Co-authored-by: leo-pony <nengjunma@outlook.com> Co-authored-by: vllmellm <vllm.ellm@embeddedllm.com> Co-authored-by: ElizaWszola <ewszola@redhat.com> Co-authored-by: Luka Govedič <lgovedic@redhat.com> Co-authored-by: Benjamin Chislett <bchislett@nvidia.com> Co-authored-by: Andrew Xia <axia@meta.com> Co-authored-by: Simon Mo <simon.mo@hey.com> Co-authored-by: TJian <tunjian.tan@embeddedllm.com> Co-authored-by: ahao-anyscale <ahao@anyscale.com> Co-authored-by: Varun Sundar Rabindranath <varunsundar08@gmail.com> Co-authored-by: Varun Sundar Rabindranath <vsundarr@redhat.com> Co-authored-by: Liu-congo <1502632128@qq.com> Co-authored-by: HUIJONG JEONG <64083281+huijjj@users.noreply.github.com> Co-authored-by: Yannick Schnider <Yannick.Schnider1@ibm.com> Co-authored-by: kyt <eluban4532@gmail.com> Co-authored-by: Egor <e.a.krivov@gmail.com> Co-authored-by: Yang Liu <127183760+KKSK-DON@users.noreply.github.com> Co-authored-by: Paul Pak <52512091+paulpak58@users.noreply.github.com> Co-authored-by: whx <56632993+whx-sjtu@users.noreply.github.com> Co-authored-by: Xiang Si <sixiang@google.com> Co-authored-by: Aleksandr Samarin <samarin_ad@mail.ru> Co-authored-by: Jun Jiang <jasl9187@hotmail.com> Co-authored-by: Chendi.Xue <chendi.xue@intel.com> Co-authored-by: Nikhil G <nrghosh@users.noreply.github.com>
1296 lines
56 KiB
Plaintext
1296 lines
56 KiB
Plaintext
#include <torch/all.h>
|
|
#include <ATen/cuda/CUDAContext.h>
|
|
#include <c10/cuda/CUDAGuard.h>
|
|
#include <c10/cuda/CUDAException.h>
|
|
|
|
#include "cuda_utils.h"
|
|
#include "cuda_compat.h"
|
|
#include "dispatch_utils.h"
|
|
#include "quantization/vectorization_utils.cuh"
|
|
|
|
#ifdef USE_ROCM
|
|
#include "quantization/w8a8/fp8/amd/quant_utils.cuh"
|
|
#else
|
|
#include "quantization/w8a8/fp8/nvidia/quant_utils.cuh"
|
|
#endif
|
|
|
|
#include <algorithm>
|
|
#include <cassert>
|
|
#include <cfloat>
|
|
|
|
#ifdef USE_ROCM
|
|
#include <hip/hip_bf16.h>
|
|
typedef __hip_bfloat16 __nv_bfloat16;
|
|
#endif
|
|
|
|
void swap_blocks(torch::Tensor& src, torch::Tensor& dst,
|
|
const torch::Tensor& block_mapping) {
|
|
torch::Device src_device = src.device();
|
|
torch::Device dst_device = dst.device();
|
|
cudaMemcpyKind memcpy_type;
|
|
if (src_device.is_cuda() && dst_device.is_cuda()) {
|
|
TORCH_CHECK(src_device.index() == dst_device.index(),
|
|
"src and dst must be on the same GPU");
|
|
memcpy_type = cudaMemcpyDeviceToDevice;
|
|
} else if (src_device.is_cuda() && dst_device.is_cpu()) {
|
|
memcpy_type = cudaMemcpyDeviceToHost;
|
|
} else if (src_device.is_cpu() && dst_device.is_cuda()) {
|
|
memcpy_type = cudaMemcpyHostToDevice;
|
|
} else {
|
|
TORCH_CHECK(false, "Invalid device combination");
|
|
}
|
|
|
|
// NOTE(youkaichao): keep in mind that `block_mapping` should be
|
|
// a cpu tensor, otherwise every `item` call will require a gpu-cpu
|
|
// synchronization.
|
|
TORCH_CHECK(block_mapping.device().is_cpu(), "block_mapping must be on CPU");
|
|
|
|
char* src_ptr = static_cast<char*>(src.data_ptr());
|
|
char* dst_ptr = static_cast<char*>(dst.data_ptr());
|
|
|
|
// We use the stride instead of numel in case the cache is padded for memory
|
|
// alignment reasons, we assume the blocks data (inclusive of any padding)
|
|
// is contiguous in memory
|
|
const int64_t block_size_in_bytes = src.element_size() * src.stride(0);
|
|
const at::cuda::OptionalCUDAGuard device_guard(
|
|
src_device.is_cuda() ? src_device : dst_device);
|
|
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
|
|
// NOTE(woosuk): This can be slow if the number of blocks is large.
|
|
const int64_t num_blocks = block_mapping.size(0);
|
|
for (size_t i = 0; i < num_blocks; i++) {
|
|
int64_t src_block_number = block_mapping[i][0].item<int64_t>();
|
|
int64_t dst_block_number = block_mapping[i][1].item<int64_t>();
|
|
int64_t src_offset = src_block_number * block_size_in_bytes;
|
|
int64_t dst_offset = dst_block_number * block_size_in_bytes;
|
|
cudaMemcpyAsync(dst_ptr + dst_offset, src_ptr + src_offset,
|
|
block_size_in_bytes, memcpy_type, stream);
|
|
}
|
|
}
|
|
|
|
namespace vllm {
|
|
|
|
// Grid: (num_layers, num_pairs)
|
|
template <typename scalar_t>
|
|
__global__ void copy_blocks_kernel(int64_t* key_cache_ptrs,
|
|
int64_t* value_cache_ptrs,
|
|
const int64_t* __restrict__ block_mapping,
|
|
const int numel_per_block) {
|
|
const int layer_idx = blockIdx.x;
|
|
const int pair_idx = blockIdx.y;
|
|
|
|
scalar_t* key_cache = reinterpret_cast<scalar_t*>(key_cache_ptrs[layer_idx]);
|
|
scalar_t* value_cache =
|
|
reinterpret_cast<scalar_t*>(value_cache_ptrs[layer_idx]);
|
|
int64_t src_block_number = block_mapping[2 * pair_idx];
|
|
int64_t dst_block_number = block_mapping[2 * pair_idx + 1];
|
|
|
|
const int64_t src_block_offset = src_block_number * numel_per_block;
|
|
const int64_t dst_block_offset = dst_block_number * numel_per_block;
|
|
for (int i = threadIdx.x; i < numel_per_block; i += blockDim.x) {
|
|
int64_t src_offset = src_block_offset + i;
|
|
int64_t dst_offset = dst_block_offset + i;
|
|
key_cache[dst_offset] = key_cache[src_offset];
|
|
}
|
|
for (int i = threadIdx.x; i < numel_per_block; i += blockDim.x) {
|
|
int64_t src_offset = src_block_offset + i;
|
|
int64_t dst_offset = dst_block_offset + i;
|
|
value_cache[dst_offset] = value_cache[src_offset];
|
|
}
|
|
}
|
|
|
|
// Kernel for MLA, which works on a single joint kv_cache
|
|
// Grid: (num_layers, num_pairs)
|
|
template <typename scalar_t>
|
|
__global__ void copy_blocks_mla_kernel(
|
|
int64_t* cache_ptrs, const int64_t* __restrict__ block_mapping,
|
|
const int mem_footprint_per_block) {
|
|
const int layer_idx = blockIdx.x;
|
|
const int pair_idx = blockIdx.y;
|
|
scalar_t* cache = reinterpret_cast<scalar_t*>(cache_ptrs[layer_idx]);
|
|
int64_t src_block = block_mapping[2 * pair_idx];
|
|
int64_t dst_block = block_mapping[2 * pair_idx + 1];
|
|
int64_t src_offset = src_block * mem_footprint_per_block;
|
|
int64_t dst_offset = dst_block * mem_footprint_per_block;
|
|
for (int i = threadIdx.x; i < mem_footprint_per_block; i += blockDim.x) {
|
|
cache[dst_offset + i] = cache[src_offset + i];
|
|
}
|
|
}
|
|
|
|
} // namespace vllm
|
|
|
|
// Note: the key_caches and value_caches vectors are constant but
|
|
// not the Tensors they contain. The vectors need to be const refs
|
|
// in order to satisfy pytorch's C++ operator registration code.
|
|
void copy_blocks(std::vector<torch::Tensor> const& key_caches,
|
|
std::vector<torch::Tensor> const& value_caches,
|
|
const torch::Tensor& block_mapping) {
|
|
int num_layers = key_caches.size();
|
|
TORCH_CHECK(num_layers == value_caches.size());
|
|
if (num_layers == 0) {
|
|
return;
|
|
}
|
|
torch::Device cache_device = key_caches[0].device();
|
|
TORCH_CHECK(cache_device.is_cuda());
|
|
|
|
// Create data structures for the kernel.
|
|
// Create an array of pointers to the key and value caches.
|
|
int64_t key_cache_ptrs[num_layers];
|
|
int64_t value_cache_ptrs[num_layers];
|
|
for (int layer_idx = 0; layer_idx < num_layers; ++layer_idx) {
|
|
key_cache_ptrs[layer_idx] =
|
|
reinterpret_cast<int64_t>(key_caches[layer_idx].data_ptr());
|
|
value_cache_ptrs[layer_idx] =
|
|
reinterpret_cast<int64_t>(value_caches[layer_idx].data_ptr());
|
|
}
|
|
|
|
// block_mapping is a 2D tensor with shape (num_pairs, 2).
|
|
int num_pairs = block_mapping.size(0);
|
|
|
|
// Move the data structures to the GPU.
|
|
// NOTE: This synchronizes the CPU and GPU.
|
|
torch::Tensor key_cache_ptrs_tensor =
|
|
torch::from_blob(key_cache_ptrs, {num_layers}, torch::kInt64)
|
|
.to(cache_device);
|
|
torch::Tensor value_cache_ptrs_tensor =
|
|
torch::from_blob(value_cache_ptrs, {num_layers}, torch::kInt64)
|
|
.to(cache_device);
|
|
|
|
// Launch the kernel.
|
|
const int numel_per_block = key_caches[0][0].numel();
|
|
dim3 grid(num_layers, num_pairs);
|
|
dim3 block(std::min(1024, numel_per_block));
|
|
const at::cuda::OptionalCUDAGuard device_guard(cache_device);
|
|
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
|
|
VLLM_DISPATCH_FLOATING_AND_BYTE_TYPES(
|
|
key_caches[0].scalar_type(), "copy_blocks_kernel", ([&] {
|
|
vllm::copy_blocks_kernel<scalar_t><<<grid, block, 0, stream>>>(
|
|
key_cache_ptrs_tensor.data_ptr<int64_t>(),
|
|
value_cache_ptrs_tensor.data_ptr<int64_t>(),
|
|
block_mapping.data_ptr<int64_t>(), numel_per_block);
|
|
}));
|
|
}
|
|
|
|
// copy blocks kernel for MLA (assumes a joint KV-cache)
|
|
void copy_blocks_mla(std::vector<torch::Tensor> const& kv_caches,
|
|
const torch::Tensor& block_mapping) {
|
|
int num_layers = kv_caches.size();
|
|
if (num_layers == 0) {
|
|
return;
|
|
}
|
|
torch::Device cache_device = kv_caches[0].device();
|
|
TORCH_CHECK(cache_device.is_cuda(), "kv_cache must be on CUDA");
|
|
|
|
std::vector<int64_t> cache_ptrs(num_layers);
|
|
for (int layer_idx = 0; layer_idx < num_layers; ++layer_idx) {
|
|
cache_ptrs[layer_idx] =
|
|
reinterpret_cast<int64_t>(kv_caches[layer_idx].data_ptr());
|
|
}
|
|
torch::Tensor cache_ptrs_tensor =
|
|
torch::from_blob(cache_ptrs.data(), {num_layers}, torch::kInt64)
|
|
.to(cache_device);
|
|
|
|
int num_pairs = block_mapping.size(0);
|
|
// We use the stride instead of numel in case the cache is padded for memory
|
|
// alignment reasons, we assume the blocks data (inclusive of any padding)
|
|
// is contiguous in memory
|
|
int mem_footprint_per_block = kv_caches[0].stride(0);
|
|
dim3 grid(num_layers, num_pairs);
|
|
dim3 block(std::min(1024, mem_footprint_per_block));
|
|
const at::cuda::OptionalCUDAGuard device_guard(cache_device);
|
|
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
|
|
VLLM_DISPATCH_FLOATING_AND_BYTE_TYPES(
|
|
kv_caches[0].scalar_type(), "copy_blocks_mla_kernel", ([&] {
|
|
vllm::copy_blocks_mla_kernel<scalar_t><<<grid, block, 0, stream>>>(
|
|
cache_ptrs_tensor.data_ptr<int64_t>(),
|
|
block_mapping.data_ptr<int64_t>(), mem_footprint_per_block);
|
|
}));
|
|
}
|
|
|
|
namespace vllm {
|
|
|
|
// Used to copy/convert one element
|
|
template <typename OutT, typename InT, Fp8KVCacheDataType kv_dt>
|
|
struct CopyWithScaleOp {
|
|
float scale;
|
|
|
|
__device__ __forceinline__ void operator()(OutT& dst, const InT src) const {
|
|
if constexpr (kv_dt == Fp8KVCacheDataType::kAuto) {
|
|
dst = static_cast<OutT>(src);
|
|
} else {
|
|
dst = fp8::scaled_convert<OutT, InT, kv_dt>(src, scale);
|
|
}
|
|
}
|
|
};
|
|
|
|
template <typename scalar_t, typename cache_t, Fp8KVCacheDataType kv_dt>
|
|
__global__ void reshape_and_cache_kernel(
|
|
const scalar_t* __restrict__ key, // [num_tokens, num_heads, head_size]
|
|
const scalar_t* __restrict__ value, // [num_tokens, num_heads, head_size]
|
|
cache_t* __restrict__ key_cache, // [num_blocks, num_heads, head_size/x,
|
|
// block_size, x]
|
|
cache_t* __restrict__ value_cache, // [num_blocks, num_heads, head_size,
|
|
// block_size]
|
|
const int64_t* __restrict__ slot_mapping, // [num_tokens]
|
|
const int key_stride, const int value_stride, const int num_heads,
|
|
const int head_size, const int block_size, const int x,
|
|
const float* k_scale, const float* v_scale) {
|
|
const int64_t token_idx = blockIdx.x;
|
|
const int64_t slot_idx = slot_mapping[token_idx];
|
|
if (slot_idx < 0) {
|
|
return;
|
|
}
|
|
|
|
const int64_t block_idx = slot_idx / block_size;
|
|
const int64_t block_offset = slot_idx % block_size;
|
|
const int h_block_count = head_size / x; // head_size//x
|
|
|
|
const int h_block_idx = threadIdx.x;
|
|
if (h_block_idx >= num_heads * h_block_count) {
|
|
return;
|
|
}
|
|
|
|
const int head_idx = h_block_idx / h_block_count;
|
|
const int h_block = h_block_idx % h_block_count;
|
|
|
|
const scalar_t* __restrict__ key_src =
|
|
key + token_idx * key_stride + head_idx * head_size + h_block * x;
|
|
const int64_t src_value_start =
|
|
token_idx * value_stride + head_idx * head_size + h_block * x;
|
|
|
|
cache_t* __restrict__ key_dst =
|
|
key_cache + block_idx * num_heads * h_block_count * block_size * x +
|
|
head_idx * h_block_count * block_size * x + h_block * block_size * x +
|
|
block_offset * x;
|
|
const int64_t tgt_value_start =
|
|
block_idx * num_heads * h_block_count * x * block_size +
|
|
head_idx * h_block_count * x * block_size + h_block * x * block_size +
|
|
block_offset;
|
|
|
|
constexpr int VEC_SIZE = (sizeof(scalar_t) == 2) ? 8 : 4;
|
|
float k_scale_val = (kv_dt == Fp8KVCacheDataType::kAuto) ? 0.f : *k_scale;
|
|
CopyWithScaleOp<cache_t, scalar_t, kv_dt> k_op{k_scale_val};
|
|
float v_scale_val = (kv_dt == Fp8KVCacheDataType::kAuto) ? 0.f : *v_scale;
|
|
CopyWithScaleOp<cache_t, scalar_t, kv_dt> v_op{v_scale_val};
|
|
|
|
vectorize_with_alignment<VEC_SIZE>(key_src, key_dst, x, 0, 1, k_op);
|
|
|
|
const scalar_t* __restrict__ value_src = value + src_value_start;
|
|
cache_t* __restrict__ value_dst = value_cache + tgt_value_start;
|
|
#pragma unroll
|
|
for (int i = 0; i < x; i++) {
|
|
v_op(value_dst[i * block_size], value_src[i]);
|
|
}
|
|
}
|
|
|
|
template <typename scalar_t, typename cache_t, Fp8KVCacheDataType kv_dt>
|
|
__global__ void reshape_and_cache_flash_kernel(
|
|
const scalar_t* __restrict__ key, // [num_tokens, num_heads, head_size]
|
|
const scalar_t* __restrict__ value, // [num_tokens, num_heads, head_size]
|
|
cache_t* __restrict__ key_cache, // NHD or HND, shape see comments below
|
|
cache_t* __restrict__ value_cache, // same above
|
|
const int64_t* __restrict__ slot_mapping, // [num_tokens]
|
|
const int64_t block_stride, const int64_t page_stride,
|
|
const int64_t head_stride, const int64_t key_stride,
|
|
const int64_t value_stride, const int num_heads, const int head_size,
|
|
const int block_size, const float* k_scale, const float* v_scale) {
|
|
const int64_t token_idx = blockIdx.x;
|
|
const int64_t slot_idx = slot_mapping[token_idx];
|
|
// NOTE: slot_idx can be -1 if the token is padded
|
|
if (slot_idx < 0) {
|
|
return;
|
|
}
|
|
const int64_t block_idx = slot_idx / block_size;
|
|
const int64_t block_offset = slot_idx % block_size;
|
|
const int n_elems = num_heads * head_size;
|
|
|
|
// pointers to the beginning of the source row for this token.
|
|
const scalar_t* __restrict__ key_src = key + token_idx * key_stride;
|
|
const scalar_t* __restrict__ value_src = value + token_idx * value_stride;
|
|
|
|
// find the start position inside the kv-cache for this token.
|
|
cache_t* __restrict__ key_dst =
|
|
key_cache + block_idx * block_stride + block_offset * page_stride;
|
|
cache_t* __restrict__ value_dst =
|
|
value_cache + block_idx * block_stride + block_offset * page_stride;
|
|
|
|
// this is true for the NHD layout where `head_stride == head_size`
|
|
const bool is_contiguous_heads = (head_stride == head_size);
|
|
|
|
float k_scale_val = (kv_dt == Fp8KVCacheDataType::kAuto) ? 0.f : *k_scale;
|
|
float v_scale_val = (kv_dt == Fp8KVCacheDataType::kAuto) ? 0.f : *v_scale;
|
|
constexpr int VEC_SIZE = (sizeof(scalar_t) == 2) ? 8 : 4;
|
|
CopyWithScaleOp<cache_t, scalar_t, kv_dt> k_op{k_scale_val};
|
|
CopyWithScaleOp<cache_t, scalar_t, kv_dt> v_op{v_scale_val};
|
|
if (is_contiguous_heads) {
|
|
// NHD layout
|
|
// kv cache: [num_blocks, block_size, num_heads, head_size]
|
|
vectorize_with_alignment<VEC_SIZE>(key_src, key_dst, n_elems, threadIdx.x,
|
|
blockDim.x, k_op);
|
|
|
|
vectorize_with_alignment<VEC_SIZE>(value_src, value_dst, n_elems,
|
|
threadIdx.x, blockDim.x, v_op);
|
|
|
|
} else {
|
|
// HND layout: heads are strided, but each head_size segment is contiguous
|
|
// kv cache: [num_blocks, num_heads, block_size, head_size]
|
|
const int lane = threadIdx.x & 31; // 0..31 within warp
|
|
const int warp_id = threadIdx.x >> 5; // warp index within block
|
|
const int warps_per_block = blockDim.x >> 5;
|
|
|
|
for (int head = warp_id; head < num_heads; head += warps_per_block) {
|
|
const scalar_t* __restrict__ k_src_h = key_src + head * head_size;
|
|
const scalar_t* __restrict__ v_src_h = value_src + head * head_size;
|
|
|
|
cache_t* __restrict__ k_dst_h =
|
|
key_dst + static_cast<int64_t>(head) * head_stride;
|
|
cache_t* __restrict__ v_dst_h =
|
|
value_dst + static_cast<int64_t>(head) * head_stride;
|
|
|
|
// within each head, let the 32 threads of the warp perform the vector
|
|
// copy
|
|
vectorize_with_alignment<VEC_SIZE>(k_src_h, k_dst_h, head_size, lane, 32,
|
|
k_op);
|
|
|
|
vectorize_with_alignment<VEC_SIZE>(v_src_h, v_dst_h, head_size, lane, 32,
|
|
v_op);
|
|
}
|
|
}
|
|
}
|
|
|
|
template <typename scalar_t, typename cache_t, Fp8KVCacheDataType kv_dt>
|
|
__global__ void concat_and_cache_mla_kernel(
|
|
const scalar_t* __restrict__ kv_c, // [num_tokens, kv_lora_rank]
|
|
const scalar_t* __restrict__ k_pe, // [num_tokens, pe_dim]
|
|
cache_t* __restrict__ kv_cache, // [num_blocks, block_size, (kv_lora_rank
|
|
// + pe_dim)]
|
|
const int64_t* __restrict__ slot_mapping, // [num_tokens]
|
|
const int block_stride, //
|
|
const int entry_stride, //
|
|
const int kv_c_stride, //
|
|
const int k_pe_stride, //
|
|
const int kv_lora_rank, //
|
|
const int pe_dim, //
|
|
const int block_size, //
|
|
const float* scale //
|
|
) {
|
|
const int64_t token_idx = blockIdx.x;
|
|
const int64_t slot_idx = slot_mapping[token_idx];
|
|
// NOTE: slot_idx can be -1 if the token is padded
|
|
if (slot_idx < 0) {
|
|
return;
|
|
}
|
|
const int64_t block_idx = slot_idx / block_size;
|
|
const int64_t block_offset = slot_idx % block_size;
|
|
|
|
auto copy = [&](const scalar_t* __restrict__ src, cache_t* __restrict__ dst,
|
|
int src_stride, int dst_stride, int size, int offset) {
|
|
for (int i = threadIdx.x; i < size; i += blockDim.x) {
|
|
const int64_t src_idx = token_idx * src_stride + i;
|
|
const int64_t dst_idx =
|
|
block_idx * block_stride + block_offset * entry_stride + i + offset;
|
|
if constexpr (kv_dt == Fp8KVCacheDataType::kAuto) {
|
|
dst[dst_idx] = src[src_idx];
|
|
} else {
|
|
dst[dst_idx] =
|
|
fp8::scaled_convert<cache_t, scalar_t, kv_dt>(src[src_idx], *scale);
|
|
}
|
|
}
|
|
};
|
|
|
|
copy(kv_c, kv_cache, kv_c_stride, block_stride, kv_lora_rank, 0);
|
|
copy(k_pe, kv_cache, k_pe_stride, block_stride, pe_dim, kv_lora_rank);
|
|
}
|
|
|
|
template <typename scalar_t, typename cache_t, Fp8KVCacheDataType kv_dt>
|
|
__global__ void concat_and_cache_ds_mla_kernel(
|
|
const scalar_t* __restrict__ kv_c, // [num_tokens, kv_lora_rank]
|
|
const scalar_t* __restrict__ k_pe, // [num_tokens, pe_dim]
|
|
cache_t* __restrict__ kv_cache, // [num_blocks, block_size, (kv_lora_rank
|
|
// + pe_dim)]
|
|
const int64_t* __restrict__ slot_mapping, // [num_tokens]
|
|
const int block_stride, //
|
|
const int entry_stride, //
|
|
const int kv_c_stride, //
|
|
const int k_pe_stride, //
|
|
const int kv_lora_rank, //
|
|
const int pe_dim, //
|
|
const int block_size, //
|
|
const float* scale //
|
|
) {
|
|
const int64_t token_idx = blockIdx.x;
|
|
const int64_t slot_idx = slot_mapping[token_idx];
|
|
// NOTE: slot_idx can be -1 if the token is padded
|
|
if (slot_idx < 0) {
|
|
return;
|
|
}
|
|
const int64_t block_idx = slot_idx / block_size;
|
|
const int64_t block_offset = slot_idx % block_size;
|
|
const int64_t dst_idx_start =
|
|
block_idx * block_stride + block_offset * entry_stride;
|
|
|
|
// For the NoPE part, each tile of 128 elements is handled by half of one warp
|
|
// (16 threads). There are 4 total tiles, so 2 warps (64 threads).
|
|
// Lanes 0 and 16 of each warp write the scale values for that warp's tiles.
|
|
// The RoPE part (last 64 elements) is handled by another 1 warp (32 threads).
|
|
// So in total, we use 3 warps (96 threads) per block.
|
|
|
|
// Cast kv_cache to 16_bit for RoPE values
|
|
scalar_t* kv_cache_16bit =
|
|
reinterpret_cast<scalar_t*>(&kv_cache[dst_idx_start]);
|
|
|
|
// The last warp handles the RoPE part
|
|
if (threadIdx.x >= 64) {
|
|
// Each thread handles two elements of RoPE
|
|
const int8_t pe_idx_start = (threadIdx.x - 64) * 2;
|
|
const int64_t src_idx = token_idx * k_pe_stride + pe_idx_start;
|
|
// Vectorized load of two 16-bit values, performed as one 32-bit load
|
|
const int32_t vals = *reinterpret_cast<const int32_t*>(&k_pe[src_idx]);
|
|
// RoPE values start after the packed 8-bit NoPE values and the
|
|
// 32-bit scales
|
|
const int64_t dst_idx = kv_lora_rank / 2 + 8 + pe_idx_start;
|
|
// Vectorized store of two 16-bit values, performed as one 32-bit store
|
|
*reinterpret_cast<int32_t*>(&kv_cache_16bit[dst_idx]) = vals;
|
|
return;
|
|
}
|
|
|
|
// The first two warps handle the NoPE part
|
|
const int8_t warp_idx = threadIdx.x >> 5;
|
|
const int8_t lane_idx = threadIdx.x & 31;
|
|
const int8_t tile_idx = warp_idx * 2 + (lane_idx >> 4);
|
|
|
|
// Each thread handles 8 elements of NoPE
|
|
// Load the NoPE elements for this thread into registers
|
|
const int64_t src_idx_start = token_idx * kv_c_stride + (threadIdx.x * 8);
|
|
// Vectorized load of eight 16-bit values, performed as an int4 load
|
|
const int4 vals_i4 = *reinterpret_cast<const int4*>(&kv_c[src_idx_start]);
|
|
const scalar_t* vals = reinterpret_cast<const scalar_t*>(&vals_i4);
|
|
|
|
// Max absolute value of this thread's elements
|
|
float max_abs = fmaxf(fmaxf(fmaxf(fabsf(vals[0]), fabsf(vals[1])),
|
|
fmaxf(fabsf(vals[2]), fabsf(vals[3]))),
|
|
fmaxf(fmaxf(fabsf(vals[4]), fabsf(vals[5])),
|
|
fmaxf(fabsf(vals[6]), fabsf(vals[7]))));
|
|
|
|
// Warp-level reduction to find the max absolute value in each half-warp
|
|
#pragma unroll
|
|
for (int offset = 8; offset > 0; offset /= 2) {
|
|
max_abs = fmaxf(max_abs, VLLM_SHFL_XOR_SYNC_WIDTH(max_abs, offset, 16));
|
|
}
|
|
|
|
// Compute the scale for the tile
|
|
float tile_scale = max_abs / 448.f;
|
|
tile_scale = fmaxf(tile_scale, FLT_MIN);
|
|
|
|
// The first lane of each half-warp writes the scale to kv_cache
|
|
if ((lane_idx == 0) || (lane_idx == 16)) {
|
|
float* kv_cache_32bit = reinterpret_cast<float*>(&kv_cache[dst_idx_start]);
|
|
const uint64_t dst_idx = kv_lora_rank / 4 + tile_idx;
|
|
kv_cache_32bit[dst_idx] = tile_scale;
|
|
}
|
|
|
|
// Now all threads in the block scale and write their elements
|
|
// NoPE data is packed in the first kv_lora_rank/2 bytes (first 256 bytes)
|
|
const int64_t dst_idx_base = dst_idx_start + (threadIdx.x * 8);
|
|
|
|
uint8_t result[8];
|
|
#pragma unroll
|
|
for (int i = 0; i < 8; i++) {
|
|
result[i] =
|
|
fp8::scaled_convert<uint8_t, scalar_t, Fp8KVCacheDataType::kFp8E4M3>(
|
|
vals[i], tile_scale);
|
|
}
|
|
|
|
// Store as aligned 64-bit writes
|
|
*reinterpret_cast<uint64_t*>(&kv_cache[dst_idx_base]) =
|
|
*reinterpret_cast<const uint64_t*>(result);
|
|
}
|
|
|
|
template <typename scalar_t, typename cache_t, Fp8KVCacheDataType kv_dt>
|
|
__global__ void indexer_k_quant_and_cache_kernel(
|
|
const scalar_t* __restrict__ k, // [num_tokens, head_dim]
|
|
cache_t* __restrict__ kv_cache, // [num_blocks, block_size, cache_stride]
|
|
const int64_t* __restrict__ slot_mapping, // [num_tokens]
|
|
const int head_dim, // dimension of each head
|
|
const int quant_block_size, // quantization block size
|
|
const int cache_block_size, // cache block size
|
|
const int cache_stride, // stride for each token in kv_cache
|
|
const bool use_ue8m0 // use ue8m0 scale format
|
|
) {
|
|
constexpr int VEC_SIZE = 4;
|
|
const int64_t token_idx = blockIdx.x;
|
|
const int64_t head_dim_idx = (blockIdx.y * blockDim.y * blockDim.x +
|
|
threadIdx.y * blockDim.x + threadIdx.x) *
|
|
VEC_SIZE;
|
|
const int64_t slot_idx = slot_mapping[token_idx];
|
|
const int64_t block_idx = slot_idx / cache_block_size;
|
|
const int64_t block_offset = slot_idx % cache_block_size;
|
|
|
|
// NOTE: slot_idx can be -1 if the token is padded
|
|
if (slot_idx < 0 || (head_dim_idx >= head_dim)) {
|
|
return;
|
|
}
|
|
|
|
float2 k_val = (reinterpret_cast<const float2*>(
|
|
k))[(token_idx * head_dim + head_dim_idx) / VEC_SIZE];
|
|
scalar_t* k_val_ptr = reinterpret_cast<scalar_t*>(&k_val);
|
|
float amax = 0.0f;
|
|
for (int i = 0; i < VEC_SIZE; i++) {
|
|
amax = fmaxf(amax, fabsf(float(k_val_ptr[i])));
|
|
}
|
|
#ifndef USE_ROCM
|
|
__syncwarp();
|
|
#endif
|
|
|
|
// Reduced amax
|
|
for (int mask = 16; mask > 0; mask /= 2) {
|
|
#ifdef USE_ROCM
|
|
amax = fmaxf(amax, __shfl_xor_sync(uint64_t(-1), amax, mask));
|
|
#else
|
|
amax = fmaxf(amax, __shfl_xor_sync(unsigned(-1), amax, mask));
|
|
#endif
|
|
}
|
|
#ifndef USE_ROCM
|
|
__syncwarp();
|
|
#endif
|
|
float scale = fmaxf(amax, 1e-4) / 448.0f;
|
|
if (use_ue8m0) {
|
|
scale = exp2f(ceilf(log2f(scale)));
|
|
}
|
|
|
|
const int64_t dst_offset = block_idx * cache_block_size * cache_stride +
|
|
block_offset * head_dim + head_dim_idx;
|
|
for (int i = 0; i < VEC_SIZE; i++) {
|
|
kv_cache[dst_offset + i] =
|
|
fp8::scaled_convert<cache_t, scalar_t, kv_dt>(k_val_ptr[i], scale);
|
|
}
|
|
if (threadIdx.x == 0) {
|
|
const int64_t dst_scale_idx =
|
|
block_idx * cache_block_size * cache_stride +
|
|
cache_block_size * head_dim +
|
|
(block_offset * head_dim + head_dim_idx) * 4 / quant_block_size;
|
|
reinterpret_cast<float*>(kv_cache)[dst_scale_idx / 4] = scale;
|
|
}
|
|
}
|
|
|
|
template <int BLOCK_Y_SIZE>
|
|
__global__ void cp_gather_indexer_k_quant_cache_kernel(
|
|
const char* __restrict__ kv_cache, // [num_blocks, block_size,
|
|
// cache_stride]
|
|
char* __restrict__ dst_k, // [num_tokens, head_dim]
|
|
char* __restrict__ dst_scale, // [num_tokens, head_dim / quant_block_size *
|
|
// 4]
|
|
const int* __restrict__ block_table, // [batch_size, num_blocks]
|
|
const int* __restrict__ cu_seq_lens, // [batch_size + 1]
|
|
const int batch_size, // batch size
|
|
const int64_t token_stride, // stride for each token in dst_k
|
|
const int64_t head_dim, // dimension of each head
|
|
const int64_t block_stride, // stride for each block in kv_cache
|
|
const int64_t cache_token_stride, // stride for each token in kv_cache
|
|
const int64_t cache_block_size, // num_tokens for each block in kv_cache
|
|
const int num_blocks, // number of blocks
|
|
const int num_tokens, // number of tokens
|
|
const int quant_block_size // quantization block size
|
|
) {
|
|
constexpr int VEC_SIZE = sizeof(float4) / sizeof(char);
|
|
const int token_idx = blockIdx.x * blockDim.y + threadIdx.y;
|
|
const int head_idx = (blockIdx.y * blockDim.x + threadIdx.x) * VEC_SIZE;
|
|
// Find batch index within a block
|
|
__shared__ int batch_idx[BLOCK_Y_SIZE];
|
|
for (int iter = 0; iter < cuda_utils::ceil_div(batch_size, int(blockDim.x));
|
|
iter++) {
|
|
int tid = iter * blockDim.x + threadIdx.x;
|
|
if (tid < batch_size) {
|
|
const int seq_start = cu_seq_lens[tid];
|
|
const int seq_end = cu_seq_lens[tid + 1];
|
|
if (token_idx >= seq_start && token_idx < seq_end) {
|
|
batch_idx[threadIdx.y] = tid;
|
|
}
|
|
}
|
|
}
|
|
|
|
#ifndef USE_ROCM
|
|
__syncwarp();
|
|
#endif
|
|
|
|
if (head_idx >= head_dim || token_idx >= num_tokens) {
|
|
return;
|
|
}
|
|
const int inbatch_seq_idx = token_idx - cu_seq_lens[batch_idx[threadIdx.y]];
|
|
const int block_idx = block_table[batch_idx[threadIdx.y] * num_blocks +
|
|
inbatch_seq_idx / cache_block_size];
|
|
const int64_t src_block_offset = block_idx * block_stride;
|
|
const int64_t cache_inblock_offset =
|
|
(inbatch_seq_idx % cache_block_size) * head_dim + head_idx;
|
|
const int64_t src_inblock_offset = src_block_offset + cache_inblock_offset;
|
|
const int64_t dst_inblock_offset = token_idx * token_stride + head_idx;
|
|
|
|
reinterpret_cast<float4*>(dst_k)[dst_inblock_offset / VEC_SIZE] =
|
|
reinterpret_cast<const float4*>(kv_cache)[src_inblock_offset / VEC_SIZE];
|
|
;
|
|
if (threadIdx.x == 0) {
|
|
const int64_t src_scale_offset =
|
|
src_block_offset + cache_block_size * head_dim +
|
|
cache_inblock_offset * 4 / quant_block_size;
|
|
reinterpret_cast<float*>(dst_scale)[dst_inblock_offset / quant_block_size] =
|
|
reinterpret_cast<const float*>(kv_cache)[src_scale_offset / 4];
|
|
}
|
|
}
|
|
|
|
} // namespace vllm
|
|
|
|
// KV_T is the data type of key and value tensors.
|
|
// CACHE_T is the stored data type of kv-cache.
|
|
// KV_DTYPE is the real data type of kv-cache.
|
|
#define CALL_RESHAPE_AND_CACHE(KV_T, CACHE_T, KV_DTYPE) \
|
|
vllm::reshape_and_cache_kernel<KV_T, CACHE_T, KV_DTYPE> \
|
|
<<<grid, block, 0, stream>>>( \
|
|
reinterpret_cast<KV_T*>(key.data_ptr()), \
|
|
reinterpret_cast<KV_T*>(value.data_ptr()), \
|
|
reinterpret_cast<CACHE_T*>(key_cache.data_ptr()), \
|
|
reinterpret_cast<CACHE_T*>(value_cache.data_ptr()), \
|
|
slot_mapping.data_ptr<int64_t>(), key_stride, value_stride, \
|
|
num_heads, head_size, block_size, x, \
|
|
reinterpret_cast<const float*>(k_scale.data_ptr()), \
|
|
reinterpret_cast<const float*>(v_scale.data_ptr()));
|
|
|
|
void reshape_and_cache(
|
|
torch::Tensor& key, // [num_tokens, num_heads, head_size]
|
|
torch::Tensor& value, // [num_tokens, num_heads, head_size]
|
|
torch::Tensor&
|
|
key_cache, // [num_blocks, num_heads, head_size/x, block_size, x]
|
|
torch::Tensor&
|
|
value_cache, // [num_blocks, num_heads, head_size, block_size]
|
|
torch::Tensor& slot_mapping, // [num_tokens]
|
|
const std::string& kv_cache_dtype, torch::Tensor& k_scale,
|
|
torch::Tensor& v_scale) {
|
|
int num_tokens = slot_mapping.size(0);
|
|
int num_heads = key.size(1);
|
|
int head_size = key.size(2);
|
|
int block_size = key_cache.size(3);
|
|
int x = key_cache.size(4);
|
|
|
|
int key_stride = key.stride(0);
|
|
int value_stride = value.stride(0);
|
|
int head_div_x = head_size / x;
|
|
|
|
dim3 grid(num_tokens);
|
|
dim3 block(std::min(num_heads * head_div_x, 512));
|
|
const at::cuda::OptionalCUDAGuard device_guard(device_of(key));
|
|
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
|
|
|
|
DISPATCH_BY_KV_CACHE_DTYPE(key.dtype(), kv_cache_dtype,
|
|
CALL_RESHAPE_AND_CACHE);
|
|
}
|
|
|
|
// KV_T is the data type of key and value tensors.
|
|
// CACHE_T is the stored data type of kv-cache.
|
|
// KV_DTYPE is the real data type of kv-cache.
|
|
#define CALL_RESHAPE_AND_CACHE_FLASH(KV_T, CACHE_T, KV_DTYPE) \
|
|
vllm::reshape_and_cache_flash_kernel<KV_T, CACHE_T, KV_DTYPE> \
|
|
<<<grid, block, 0, stream>>>( \
|
|
reinterpret_cast<KV_T*>(key.data_ptr()), \
|
|
reinterpret_cast<KV_T*>(value.data_ptr()), \
|
|
reinterpret_cast<CACHE_T*>(key_cache.data_ptr()), \
|
|
reinterpret_cast<CACHE_T*>(value_cache.data_ptr()), \
|
|
slot_mapping.data_ptr<int64_t>(), block_stride, page_stride, \
|
|
head_stride, key_stride, value_stride, num_heads, head_size, \
|
|
block_size, reinterpret_cast<const float*>(k_scale.data_ptr()), \
|
|
reinterpret_cast<const float*>(v_scale.data_ptr()));
|
|
|
|
void reshape_and_cache_flash(
|
|
torch::Tensor& key, // [num_tokens, num_heads, head_size]
|
|
torch::Tensor& value, // [num_tokens, num_heads, head_size]
|
|
torch::Tensor& key_cache, // [num_blocks, block_size, num_heads, head_size]
|
|
torch::Tensor&
|
|
value_cache, // [num_blocks, block_size, num_heads, head_size]
|
|
torch::Tensor& slot_mapping, // [num_tokens] or [num_actual_tokens]
|
|
const std::string& kv_cache_dtype, torch::Tensor& k_scale,
|
|
torch::Tensor& v_scale) {
|
|
// NOTE(woosuk): In vLLM V1, key.size(0) can be different from
|
|
// slot_mapping.size(0) because of padding for CUDA graphs.
|
|
// In vLLM V0, key.size(0) is always equal to slot_mapping.size(0) because
|
|
// both include padding.
|
|
// In vLLM V1, however, key.size(0) can be larger than slot_mapping.size(0)
|
|
// since key includes padding for CUDA graphs, while slot_mapping does not.
|
|
// In this case, slot_mapping.size(0) represents the actual number of tokens
|
|
// before padding.
|
|
// For compatibility with both cases, we use slot_mapping.size(0) as the
|
|
// number of tokens.
|
|
int num_tokens = slot_mapping.size(0);
|
|
int num_heads = key.size(1);
|
|
int head_size = key.size(2);
|
|
int block_size = key_cache.size(1);
|
|
|
|
int64_t key_stride = key.stride(0);
|
|
int64_t value_stride = value.stride(0);
|
|
int64_t block_stride = key_cache.stride(0);
|
|
int64_t page_stride = key_cache.stride(1);
|
|
int64_t head_stride = key_cache.stride(2);
|
|
TORCH_CHECK(key_cache.stride(0) == value_cache.stride(0));
|
|
|
|
dim3 grid(num_tokens);
|
|
dim3 block(std::min(num_heads * head_size, 512));
|
|
const at::cuda::OptionalCUDAGuard device_guard(device_of(key));
|
|
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
|
|
|
|
DISPATCH_BY_KV_CACHE_DTYPE(key.dtype(), kv_cache_dtype,
|
|
CALL_RESHAPE_AND_CACHE_FLASH);
|
|
}
|
|
|
|
// KV_T is the data type of key and value tensors.
|
|
// CACHE_T is the stored data type of kv-cache.
|
|
// KV_DTYPE is the real data type of kv-cache.
|
|
#define CALL_CONCAT_AND_CACHE_MLA(KV_T, CACHE_T, KV_DTYPE) \
|
|
vllm::concat_and_cache_mla_kernel<KV_T, CACHE_T, KV_DTYPE> \
|
|
<<<grid, block, 0, stream>>>( \
|
|
reinterpret_cast<KV_T*>(kv_c.data_ptr()), \
|
|
reinterpret_cast<KV_T*>(k_pe.data_ptr()), \
|
|
reinterpret_cast<CACHE_T*>(kv_cache.data_ptr()), \
|
|
slot_mapping.data_ptr<int64_t>(), block_stride, entry_stride, \
|
|
kv_c_stride, k_pe_stride, kv_lora_rank, pe_dim, block_size, \
|
|
reinterpret_cast<const float*>(scale.data_ptr()));
|
|
|
|
// KV_T is the data type of key and value tensors.
|
|
// CACHE_T is the stored data type of kv-cache.
|
|
#define CALL_CONCAT_AND_CACHE_DS_MLA(KV_T, CACHE_T, KV_DTYPE) \
|
|
vllm::concat_and_cache_ds_mla_kernel<KV_T, CACHE_T, KV_DTYPE> \
|
|
<<<grid, block, 0, stream>>>( \
|
|
reinterpret_cast<KV_T*>(kv_c.data_ptr()), \
|
|
reinterpret_cast<KV_T*>(k_pe.data_ptr()), \
|
|
reinterpret_cast<CACHE_T*>(kv_cache.data_ptr()), \
|
|
slot_mapping.data_ptr<int64_t>(), block_stride, entry_stride, \
|
|
kv_c_stride, k_pe_stride, kv_lora_rank, pe_dim, block_size, \
|
|
reinterpret_cast<const float*>(scale.data_ptr()));
|
|
|
|
void concat_and_cache_mla(
|
|
torch::Tensor& kv_c, // [num_tokens, kv_lora_rank]
|
|
torch::Tensor& k_pe, // [num_tokens, pe_dim]
|
|
torch::Tensor& kv_cache, // [num_blocks, block_size, (kv_lora_rank +
|
|
// pe_dim)]
|
|
torch::Tensor& slot_mapping, // [num_tokens] or [num_actual_tokens]
|
|
const std::string& kv_cache_dtype, torch::Tensor& scale) {
|
|
// NOTE(woosuk): In vLLM V1, key.size(0) can be different from
|
|
// slot_mapping.size(0) because of padding for CUDA graphs.
|
|
// In vLLM V0, key.size(0) is always equal to slot_mapping.size(0) because
|
|
// both include padding.
|
|
// In vLLM V1, however, key.size(0) can be larger than slot_mapping.size(0)
|
|
// since key includes padding for CUDA graphs, while slot_mapping does not.
|
|
// In this case, slot_mapping.size(0) represents the actual number of tokens
|
|
// before padding.
|
|
// For compatibility with both cases, we use slot_mapping.size(0) as the
|
|
// number of tokens.
|
|
int num_tokens = slot_mapping.size(0);
|
|
int kv_lora_rank = kv_c.size(1);
|
|
int pe_dim = k_pe.size(1);
|
|
int block_size = kv_cache.size(1);
|
|
|
|
if (kv_cache_dtype == "fp8_ds_mla") {
|
|
TORCH_CHECK(kv_lora_rank == 512, "kv_lora_rank must be 512 for fp8_ds_mla");
|
|
TORCH_CHECK(pe_dim == 64, "pe_dim must be 64 for fp8_ds_mla");
|
|
TORCH_CHECK(kv_cache.size(2) == 656 / kv_cache.itemsize(),
|
|
"kv_cache.size(2) must be 656 bytes for fp8_ds_mla");
|
|
TORCH_CHECK(kv_c.itemsize() == 2,
|
|
"kv_c.itemsize() must be 2 for fp8_ds_mla");
|
|
TORCH_CHECK(k_pe.itemsize() == 2,
|
|
"k_pe.itemsize() must be 2 for fp8_ds_mla");
|
|
} else {
|
|
TORCH_CHECK(kv_cache.size(2) == kv_lora_rank + pe_dim);
|
|
}
|
|
|
|
int kv_c_stride = kv_c.stride(0);
|
|
int k_pe_stride = k_pe.stride(0);
|
|
int block_stride = kv_cache.stride(0);
|
|
int entry_stride = kv_cache.stride(1);
|
|
|
|
const at::cuda::OptionalCUDAGuard device_guard(device_of(kv_c));
|
|
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
|
|
|
|
if (kv_cache_dtype == "fp8_ds_mla") {
|
|
dim3 grid(num_tokens);
|
|
// For the NoPE part, each tile of 128 elements is handled by half of one
|
|
// warp (16 threads). There are 4 total tiles, so 2 warps (64 threads).
|
|
// Lanes 0 and 16 of each warp write the scale values for that warp's tiles.
|
|
// The RoPE part (last 64 elements) is handled by another 1 warp (32
|
|
// threads). So in total, we use 3 warps (96 threads) per block.
|
|
dim3 block(96);
|
|
DISPATCH_BY_KV_CACHE_DTYPE(kv_c.dtype(), kv_cache_dtype,
|
|
CALL_CONCAT_AND_CACHE_DS_MLA);
|
|
} else {
|
|
dim3 grid(num_tokens);
|
|
dim3 block(std::min(kv_lora_rank, 512));
|
|
DISPATCH_BY_KV_CACHE_DTYPE(kv_c.dtype(), kv_cache_dtype,
|
|
CALL_CONCAT_AND_CACHE_MLA);
|
|
}
|
|
}
|
|
|
|
namespace vllm {
|
|
|
|
template <typename Tout, typename Tin, Fp8KVCacheDataType kv_dt>
|
|
__global__ void convert_fp8_kernel(const Tin* __restrict__ src_cache,
|
|
Tout* __restrict__ dst_cache,
|
|
const float scale,
|
|
const int64_t block_stride) {
|
|
const int64_t block_idx = blockIdx.x;
|
|
for (int i = threadIdx.x; i < block_stride; i += blockDim.x) {
|
|
int64_t idx = block_idx * block_stride + i;
|
|
dst_cache[idx] =
|
|
fp8::scaled_convert<Tout, Tin, kv_dt>(src_cache[idx], scale);
|
|
}
|
|
}
|
|
|
|
} // namespace vllm
|
|
|
|
#define CALL_CONVERT_FP8(Tout, Tin, KV_DTYPE) \
|
|
vllm::convert_fp8_kernel<Tout, Tin, KV_DTYPE><<<grid, block, 0, stream>>>( \
|
|
reinterpret_cast<Tin*>(src_cache.data_ptr()), \
|
|
reinterpret_cast<Tout*>(dst_cache.data_ptr()), scale, block_stride);
|
|
|
|
// Only for testing.
|
|
void convert_fp8(torch::Tensor& dst_cache, torch::Tensor& src_cache,
|
|
const double scale, const std::string& kv_cache_dtype) {
|
|
torch::Device src_device = src_cache.device();
|
|
torch::Device dst_device = dst_cache.device();
|
|
TORCH_CHECK(src_device.is_cuda(), "src must be on a GPU")
|
|
TORCH_CHECK(dst_device.is_cuda(), "dst must be on a GPU")
|
|
TORCH_CHECK(src_device.index() == dst_device.index(),
|
|
"src and dst must be on the same GPU");
|
|
at::cuda::OptionalCUDAGuard device_guard(src_device);
|
|
|
|
int64_t num_blocks = src_cache.size(0);
|
|
int64_t block_stride = src_cache.stride(0);
|
|
|
|
dim3 grid(num_blocks);
|
|
dim3 block(std::min(block_stride, int64_t(512)));
|
|
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
|
|
|
|
if (kv_cache_dtype == "auto") {
|
|
if (src_cache.dtype() == at::ScalarType::Float) {
|
|
CALL_CONVERT_FP8(uint8_t, float, vllm::Fp8KVCacheDataType::kAuto);
|
|
} else if (src_cache.dtype() == at::ScalarType::Half) {
|
|
CALL_CONVERT_FP8(uint8_t, uint16_t, vllm::Fp8KVCacheDataType::kAuto);
|
|
} else if (src_cache.dtype() == at::ScalarType::BFloat16) {
|
|
CALL_CONVERT_FP8(uint8_t, __nv_bfloat16, vllm::Fp8KVCacheDataType::kAuto);
|
|
} else if (dst_cache.dtype() == at::ScalarType::Float) {
|
|
CALL_CONVERT_FP8(float, uint8_t, vllm::Fp8KVCacheDataType::kAuto);
|
|
} else if (dst_cache.dtype() == at::ScalarType::Half) {
|
|
CALL_CONVERT_FP8(uint16_t, uint8_t, vllm::Fp8KVCacheDataType::kAuto);
|
|
} else if (dst_cache.dtype() == at::ScalarType::BFloat16) {
|
|
CALL_CONVERT_FP8(__nv_bfloat16, uint8_t, vllm::Fp8KVCacheDataType::kAuto);
|
|
}
|
|
} else if (kv_cache_dtype == "fp8" || kv_cache_dtype == "fp8_e4m3") {
|
|
if (src_cache.dtype() == at::ScalarType::Float) {
|
|
CALL_CONVERT_FP8(uint8_t, float, vllm::Fp8KVCacheDataType::kFp8E4M3);
|
|
} else if (src_cache.dtype() == at::ScalarType::Half) {
|
|
CALL_CONVERT_FP8(uint8_t, uint16_t, vllm::Fp8KVCacheDataType::kFp8E4M3);
|
|
} else if (src_cache.dtype() == at::ScalarType::BFloat16) {
|
|
CALL_CONVERT_FP8(uint8_t, __nv_bfloat16,
|
|
vllm::Fp8KVCacheDataType::kFp8E4M3);
|
|
} else if (dst_cache.dtype() == at::ScalarType::Float) {
|
|
CALL_CONVERT_FP8(float, uint8_t, vllm::Fp8KVCacheDataType::kFp8E4M3);
|
|
} else if (dst_cache.dtype() == at::ScalarType::Half) {
|
|
CALL_CONVERT_FP8(uint16_t, uint8_t, vllm::Fp8KVCacheDataType::kFp8E4M3);
|
|
} else if (dst_cache.dtype() == at::ScalarType::BFloat16) {
|
|
CALL_CONVERT_FP8(__nv_bfloat16, uint8_t,
|
|
vllm::Fp8KVCacheDataType::kFp8E4M3);
|
|
}
|
|
} else {
|
|
TORCH_CHECK(false, "Unsupported data type: ", kv_cache_dtype);
|
|
}
|
|
}
|
|
|
|
namespace vllm {
|
|
|
|
// grid is launched with dimensions (batch, num_splits)
|
|
template <typename scalar_t, typename cache_t, Fp8KVCacheDataType kv_dt>
|
|
__global__ void gather_and_maybe_dequant_cache(
|
|
const cache_t* __restrict__ src_cache, // [NUM_BLOCKS, BLOCK_SIZE,
|
|
// ENTRIES...]
|
|
scalar_t* __restrict__ dst, // [TOT_TOKENS, ENTRIES...]
|
|
const int32_t* __restrict__ block_table, // [BATCH, BLOCK_INDICES]
|
|
const int32_t* __restrict__ cu_seq_lens, // [BATCH+1]
|
|
const int32_t block_size, const int32_t entry_size,
|
|
const int64_t block_table_stride, const int64_t cache_block_stride,
|
|
const int64_t cache_entry_stride, const int64_t dst_entry_stride,
|
|
const float* __restrict__ scale,
|
|
const int32_t* __restrict__ seq_starts) { // Optional: starting offsets per
|
|
// batch
|
|
|
|
const int64_t bid = blockIdx.x; // Batch ID
|
|
const int32_t num_splits = gridDim.y;
|
|
const int32_t split = blockIdx.y;
|
|
const int32_t seq_start = cu_seq_lens[bid];
|
|
const int32_t seq_end = cu_seq_lens[bid + 1];
|
|
const int32_t seq_len = seq_end - seq_start;
|
|
const int32_t tot_blocks = cuda_utils::ceil_div(seq_len, block_size);
|
|
const int32_t split_blocks = cuda_utils::ceil_div(tot_blocks, num_splits);
|
|
|
|
const int32_t split_start = split * split_blocks;
|
|
const int32_t split_end = min((split + 1) * split_blocks, tot_blocks);
|
|
|
|
const bool is_active_split = (split_start < tot_blocks);
|
|
const bool is_last_split = (split_end == tot_blocks);
|
|
|
|
if (!is_active_split) return;
|
|
|
|
int32_t full_blocks_end = split_end;
|
|
int32_t partial_block_size = 0;
|
|
|
|
// Adjust the pointer for the block_table for this batch.
|
|
// If seq_starts is provided, compute an offset based on (seq_starts[bid] /
|
|
// page_size)
|
|
const int32_t batch_offset = bid * block_table_stride;
|
|
int32_t offset = 0;
|
|
if (seq_starts != nullptr) {
|
|
offset = seq_starts[bid] / block_size;
|
|
}
|
|
const int32_t* batch_block_table = block_table + batch_offset + offset;
|
|
|
|
// Adjust dst pointer based on the cumulative sequence lengths.
|
|
dst += seq_start * dst_entry_stride;
|
|
|
|
if (is_last_split) {
|
|
partial_block_size = seq_len % block_size;
|
|
if (partial_block_size) full_blocks_end -= 1;
|
|
}
|
|
|
|
auto copy_entry = [&](const cache_t* __restrict__ _src,
|
|
scalar_t* __restrict__ _dst) {
|
|
for (int i = threadIdx.x; i < entry_size; i += blockDim.x) {
|
|
if constexpr (kv_dt == Fp8KVCacheDataType::kAuto) {
|
|
_dst[i] = static_cast<scalar_t>(_src[i]);
|
|
} else {
|
|
_dst[i] =
|
|
fp8::scaled_convert<scalar_t, cache_t, kv_dt>(_src[i], *scale);
|
|
}
|
|
}
|
|
};
|
|
|
|
for (int pid = split_start; pid < full_blocks_end; ++pid) {
|
|
auto block_id = batch_block_table[pid];
|
|
auto block_start_ptr = src_cache + block_id * cache_block_stride;
|
|
auto block_dst_ptr = dst + pid * block_size * dst_entry_stride;
|
|
for (int eid = 0; eid < block_size; ++eid) {
|
|
copy_entry(block_start_ptr + eid * cache_entry_stride,
|
|
block_dst_ptr + eid * dst_entry_stride);
|
|
}
|
|
}
|
|
|
|
if (partial_block_size) {
|
|
auto block_id = batch_block_table[full_blocks_end];
|
|
auto block_start_ptr = src_cache + block_id * cache_block_stride;
|
|
auto block_dst_ptr = dst + full_blocks_end * block_size * dst_entry_stride;
|
|
for (int eid = 0; eid < partial_block_size; ++eid) {
|
|
copy_entry(block_start_ptr + eid * cache_entry_stride,
|
|
block_dst_ptr + eid * dst_entry_stride);
|
|
}
|
|
}
|
|
}
|
|
|
|
} // namespace vllm
|
|
|
|
// Macro to dispatch the kernel based on the data type.
|
|
// SCALAR_T is the data type of the destination tensor.
|
|
// CACHE_T is the stored data type of kv-cache.
|
|
// KV_DTYPE is the real data type of kv-cache.
|
|
#define CALL_GATHER_CACHE(SCALAR_T, CACHE_T, KV_DTYPE) \
|
|
vllm::gather_and_maybe_dequant_cache<SCALAR_T, CACHE_T, KV_DTYPE> \
|
|
<<<grid, block, 0, stream>>>( \
|
|
reinterpret_cast<CACHE_T*>(src_cache.data_ptr()), \
|
|
reinterpret_cast<SCALAR_T*>(dst.data_ptr()), \
|
|
block_table.data_ptr<int32_t>(), cu_seq_lens.data_ptr<int32_t>(), \
|
|
block_size, entry_size, block_table_stride, cache_block_stride, \
|
|
cache_entry_stride, dst_entry_stride, \
|
|
reinterpret_cast<const float*>(scale.data_ptr()), seq_starts_ptr);
|
|
|
|
// Gather sequences from the cache into the destination tensor.
|
|
// - cu_seq_lens contains the cumulative sequence lengths for each batch
|
|
// - block_table contains the cache block indices for each sequence
|
|
// - Optionally, seq_starts (if provided) offsets the starting block index by
|
|
// (seq_starts[bid] / page_size)
|
|
void gather_and_maybe_dequant_cache(
|
|
torch::Tensor const& src_cache, // [NUM_BLOCKS, BLOCK_SIZE, ENTRIES...]
|
|
torch::Tensor const& dst, // [TOT_TOKENS, ENTRIES...]
|
|
torch::Tensor const& block_table, // [BATCH, BLOCK_INDICES]
|
|
torch::Tensor const& cu_seq_lens, // [BATCH+1]
|
|
int64_t batch_size, const std::string& kv_cache_dtype,
|
|
torch::Tensor const& scale,
|
|
std::optional<torch::Tensor> seq_starts = std::nullopt) {
|
|
at::cuda::OptionalCUDAGuard device_guard(src_cache.device());
|
|
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
|
|
|
|
int32_t block_size = src_cache.size(1);
|
|
int32_t entry_size = src_cache.flatten(2, -1).size(2);
|
|
|
|
TORCH_CHECK(block_table.dtype() == torch::kInt32,
|
|
"block_table must be int32");
|
|
TORCH_CHECK(cu_seq_lens.dtype() == torch::kInt32,
|
|
"cu_seq_lens must be int32");
|
|
if (seq_starts.has_value()) {
|
|
TORCH_CHECK(seq_starts.value().dtype() == torch::kInt32,
|
|
"seq_starts must be int32");
|
|
}
|
|
|
|
TORCH_CHECK(src_cache.device() == dst.device(),
|
|
"src_cache and dst must be on the same device");
|
|
TORCH_CHECK(src_cache.device() == block_table.device(),
|
|
"src_cache and block_table must be on the same device");
|
|
TORCH_CHECK(src_cache.device() == cu_seq_lens.device(),
|
|
"src_cache and cu_seq_lens must be on the same device");
|
|
if (seq_starts.has_value()) {
|
|
TORCH_CHECK(src_cache.device() == seq_starts.value().device(),
|
|
"src_cache and seq_starts must be on the same device");
|
|
}
|
|
|
|
int64_t block_table_stride = block_table.stride(0);
|
|
int64_t cache_block_stride = src_cache.stride(0);
|
|
int64_t cache_entry_stride = src_cache.stride(1);
|
|
int64_t dst_entry_stride = dst.stride(0);
|
|
|
|
// Decide on the number of splits based on the batch size.
|
|
int num_splits = batch_size > 128 ? 2 : batch_size > 64 ? 4 : 16;
|
|
dim3 grid(batch_size, num_splits);
|
|
dim3 block(1024);
|
|
|
|
const int32_t* seq_starts_ptr =
|
|
seq_starts.has_value() ? seq_starts.value().data_ptr<int32_t>() : nullptr;
|
|
|
|
DISPATCH_BY_KV_CACHE_DTYPE(dst.dtype(), kv_cache_dtype, CALL_GATHER_CACHE);
|
|
}
|
|
|
|
namespace vllm {
|
|
template <typename scalar_t>
|
|
// Note(hc): The cp_gather_cache allows seq_starts to no longer be divisible by
|
|
// block_size.
|
|
__global__ void cp_gather_cache(
|
|
const scalar_t* __restrict__ src_cache, // [NUM_BLOCKS, BLOCK_SIZE,
|
|
// ENTRY_SIZE]
|
|
scalar_t* __restrict__ dst, // [TOT_TOKENS, ENTRY_SIZE]
|
|
const int32_t* __restrict__ block_table, // [BATCH, BLOCK_INDICES]
|
|
const int32_t* __restrict__ cu_seq_lens, // [BATCH+1]
|
|
const int32_t block_size, const int32_t entry_size,
|
|
const int64_t block_table_stride, const int64_t cache_block_stride,
|
|
const int64_t cache_entry_stride, const int64_t dst_entry_stride,
|
|
const int32_t* __restrict__ seq_starts // Optional: starting offsets per
|
|
// batch
|
|
) {
|
|
const int64_t bid = blockIdx.x; // Batch ID
|
|
const int32_t num_splits = gridDim.y;
|
|
const int32_t split = blockIdx.y;
|
|
const int32_t seq_start = cu_seq_lens[bid];
|
|
const int32_t seq_end = cu_seq_lens[bid + 1];
|
|
const int32_t seq_len = seq_end - seq_start;
|
|
const int32_t tot_slots = seq_len;
|
|
const int32_t split_slots = cuda_utils::ceil_div(tot_slots, num_splits);
|
|
|
|
const int32_t split_start = split * split_slots;
|
|
const int32_t split_end = min((split + 1) * split_slots, tot_slots);
|
|
|
|
const bool is_active_split = (split_start < tot_slots);
|
|
|
|
if (!is_active_split) return;
|
|
|
|
// Adjust the pointer for the block_table for this batch.
|
|
// If seq_starts is provided, compute an offset based on it
|
|
const int32_t batch_offset = bid * block_table_stride;
|
|
int32_t offset = split_start;
|
|
if (seq_starts != nullptr) {
|
|
offset += seq_starts[bid];
|
|
}
|
|
int32_t offset_div = offset / block_size;
|
|
offset = offset % block_size;
|
|
const int32_t* batch_block_table = block_table + batch_offset;
|
|
|
|
// Adjust dst pointer based on the cumulative sequence lengths.
|
|
dst += seq_start * dst_entry_stride;
|
|
|
|
auto copy_entry = [&](const scalar_t* __restrict__ _src,
|
|
scalar_t* __restrict__ _dst) {
|
|
for (int i = threadIdx.x; i < entry_size; i += blockDim.x)
|
|
_dst[i] = _src[i];
|
|
};
|
|
|
|
for (int pid = split_start; pid < split_end; ++pid) {
|
|
auto block_id = batch_block_table[offset_div];
|
|
auto block_start_ptr = src_cache + block_id * cache_block_stride;
|
|
auto block_dst_ptr = dst + pid * dst_entry_stride;
|
|
copy_entry(block_start_ptr + offset * cache_entry_stride, block_dst_ptr);
|
|
offset += 1;
|
|
// bump to next block
|
|
if (offset == block_size) {
|
|
offset_div += 1;
|
|
offset = 0;
|
|
}
|
|
}
|
|
}
|
|
} // namespace vllm
|
|
|
|
// Macro to dispatch the kernel based on the data type.
|
|
#define CALL_CP_GATHER_CACHE(CPY_DTYPE) \
|
|
vllm::cp_gather_cache<CPY_DTYPE><<<grid, block, 0, stream>>>( \
|
|
reinterpret_cast<CPY_DTYPE*>(src_cache.data_ptr()), \
|
|
reinterpret_cast<CPY_DTYPE*>(dst.data_ptr()), \
|
|
block_table.data_ptr<int32_t>(), cu_seq_lens.data_ptr<int32_t>(), \
|
|
block_size, entry_size, block_table_stride, cache_block_stride, \
|
|
cache_entry_stride, dst_entry_stride, seq_starts_ptr);
|
|
|
|
// Gather sequences from the cache into the destination tensor.
|
|
// - cu_seq_lens contains the cumulative sequence lengths for each batch
|
|
// - block_table contains the cache block indices for each sequence
|
|
// - Optionally, seq_starts (if provided) offsets the starting slot index by
|
|
// seq_starts[bid]
|
|
void cp_gather_cache(
|
|
torch::Tensor const& src_cache, // [NUM_BLOCKS, BLOCK_SIZE, ENTRIES...]
|
|
torch::Tensor const& dst, // [TOT_TOKENS, ENTRIES...]
|
|
torch::Tensor const& block_table, // [BATCH, BLOCK_INDICES]
|
|
torch::Tensor const& cu_seq_lens, // [BATCH+1]
|
|
int64_t batch_size,
|
|
std::optional<torch::Tensor> seq_starts = std::nullopt) {
|
|
at::cuda::OptionalCUDAGuard device_guard(src_cache.device());
|
|
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
|
|
|
|
int32_t block_size = src_cache.size(1);
|
|
int32_t entry_size = src_cache.flatten(2, -1).size(2);
|
|
|
|
TORCH_CHECK(block_table.dtype() == torch::kInt32,
|
|
"block_table must be int32");
|
|
TORCH_CHECK(cu_seq_lens.dtype() == torch::kInt32,
|
|
"cu_seq_lens must be int32");
|
|
if (seq_starts.has_value()) {
|
|
TORCH_CHECK(seq_starts.value().dtype() == torch::kInt32,
|
|
"seq_starts must be int32");
|
|
}
|
|
|
|
TORCH_CHECK(src_cache.device() == dst.device(),
|
|
"src_cache and dst must be on the same device");
|
|
TORCH_CHECK(src_cache.device() == block_table.device(),
|
|
"src_cache and block_table must be on the same device");
|
|
TORCH_CHECK(src_cache.device() == cu_seq_lens.device(),
|
|
"src_cache and cu_seq_lens must be on the same device");
|
|
if (seq_starts.has_value()) {
|
|
TORCH_CHECK(src_cache.device() == seq_starts.value().device(),
|
|
"src_cache and seq_starts must be on the same device");
|
|
}
|
|
|
|
int64_t block_table_stride = block_table.stride(0);
|
|
int64_t cache_block_stride = src_cache.stride(0);
|
|
int64_t cache_entry_stride = src_cache.stride(1);
|
|
int64_t dst_entry_stride = dst.stride(0);
|
|
|
|
// Decide on the number of splits based on the batch size.
|
|
int num_splits = batch_size > 128 ? 2 : batch_size > 64 ? 4 : 16;
|
|
dim3 grid(batch_size, num_splits);
|
|
dim3 block(1024);
|
|
|
|
TORCH_CHECK(src_cache.dtype() == dst.dtype(),
|
|
"src_cache and dst must have the same dtype");
|
|
|
|
const int dtype_bits = src_cache.element_size() * 8;
|
|
const int32_t* seq_starts_ptr =
|
|
seq_starts.has_value() ? seq_starts.value().data_ptr<int32_t>() : nullptr;
|
|
|
|
if (dtype_bits == 32) {
|
|
CALL_CP_GATHER_CACHE(uint32_t);
|
|
} else if (dtype_bits == 16) {
|
|
CALL_CP_GATHER_CACHE(uint16_t);
|
|
} else if (dtype_bits == 8) {
|
|
CALL_CP_GATHER_CACHE(uint8_t);
|
|
} else {
|
|
TORCH_CHECK(false, "Unsupported data type width: ", dtype_bits);
|
|
}
|
|
}
|
|
|
|
// Macro to dispatch the kernel based on the data type.
|
|
#define CALL_INDEXER_K_QUANT_AND_CACHE(KV_T, CACHE_T, KV_DTYPE) \
|
|
vllm::indexer_k_quant_and_cache_kernel<KV_T, CACHE_T, KV_DTYPE> \
|
|
<<<grid, block, 0, stream>>>( \
|
|
reinterpret_cast<KV_T*>(k.data_ptr()), \
|
|
reinterpret_cast<CACHE_T*>(kv_cache.data_ptr()), \
|
|
slot_mapping.data_ptr<int64_t>(), head_dim, quant_block_size, \
|
|
cache_block_size, cache_stride, use_ue8m0);
|
|
|
|
void indexer_k_quant_and_cache(
|
|
torch::Tensor& k, // [num_tokens, head_dim]
|
|
torch::Tensor& kv_cache, // [num_blocks, block_size, cache_stride]
|
|
torch::Tensor& slot_mapping, // [num_tokens]
|
|
int64_t quant_block_size, // quantization block size
|
|
const std::string& scale_fmt) {
|
|
int num_tokens = k.size(0);
|
|
int head_dim = k.size(1);
|
|
int cache_block_size = kv_cache.size(1);
|
|
int cache_stride = kv_cache.size(2);
|
|
bool use_ue8m0 = scale_fmt == "ue8m0";
|
|
|
|
TORCH_CHECK(k.device() == kv_cache.device(),
|
|
"k and kv_cache must be on the same device");
|
|
TORCH_CHECK(k.device() == slot_mapping.device(),
|
|
"k and slot_mapping must be on the same device");
|
|
TORCH_CHECK(head_dim % quant_block_size == 0,
|
|
"head_dim must be divisible by quant_block_size");
|
|
|
|
constexpr int vec_size = 4;
|
|
dim3 grid(num_tokens, (head_dim + quant_block_size * vec_size - 1) /
|
|
(quant_block_size * vec_size));
|
|
dim3 block(32, vec_size);
|
|
const at::cuda::OptionalCUDAGuard device_guard(device_of(k));
|
|
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
|
|
|
|
DISPATCH_BY_KV_CACHE_DTYPE(k.dtype(), "fp8_e4m3",
|
|
CALL_INDEXER_K_QUANT_AND_CACHE);
|
|
}
|
|
|
|
// Macro to dispatch the kernel based on the data amount.
|
|
#define CALL_CP_GATHER_INDEXER_K_QUANT_CACHE(BLOCK_Y_SIZE) \
|
|
vllm::cp_gather_indexer_k_quant_cache_kernel<BLOCK_Y_SIZE> \
|
|
<<<dim3((num_tokens + BLOCK_Y_SIZE - 1) / BLOCK_Y_SIZE, \
|
|
(head_dim + 8 * vec_size - 1) / (8 * vec_size)), \
|
|
dim3(8, BLOCK_Y_SIZE), 0, stream>>>( \
|
|
reinterpret_cast<char*>(kv_cache.data_ptr()), \
|
|
reinterpret_cast<char*>(dst_k.data_ptr()), \
|
|
reinterpret_cast<char*>(dst_scale.data_ptr()), \
|
|
block_table.data_ptr<int32_t>(), cu_seq_lens.data_ptr<int32_t>(), \
|
|
batch_size, dst_k.stride(0), dst_k.size(1), kv_cache.stride(0), \
|
|
kv_cache.stride(1), kv_cache.size(1), block_table.size(1), \
|
|
num_tokens, quant_block_size);
|
|
|
|
void cp_gather_indexer_k_quant_cache(
|
|
const torch::Tensor& kv_cache, // [num_blocks, block_size, cache_stride]
|
|
torch::Tensor& dst_k, // [num_tokens, head_dim]
|
|
torch::Tensor& dst_scale, // [num_tokens, head_dim / quant_block_size * 4]
|
|
const torch::Tensor& block_table, // [batch_size, num_blocks]
|
|
const torch::Tensor& cu_seq_lens // [batch_size + 1]
|
|
) {
|
|
int batch_size = block_table.size(0);
|
|
int num_tokens = dst_k.size(0);
|
|
int head_dim = dst_k.size(1);
|
|
int quant_block_size = head_dim * 4 / dst_scale.size(1);
|
|
|
|
TORCH_CHECK(kv_cache.device() == dst_k.device(),
|
|
"kv_cache and dst_k must be on the same device");
|
|
TORCH_CHECK(kv_cache.device() == dst_scale.device(),
|
|
"kv_cache and dst_scale must be on the same device");
|
|
TORCH_CHECK(kv_cache.device() == block_table.device(),
|
|
"kv_cache and block_table must be on the same device");
|
|
TORCH_CHECK(kv_cache.device() == cu_seq_lens.device(),
|
|
"kv_cache and cu_seq_lens must be on the same device");
|
|
TORCH_CHECK(head_dim % quant_block_size == 0,
|
|
"head_dim must be divisible by quant_block_size");
|
|
|
|
constexpr int vec_size = 16;
|
|
const at::cuda::OptionalCUDAGuard device_guard(device_of(kv_cache));
|
|
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
|
|
|
|
if (num_tokens < 32) {
|
|
CALL_CP_GATHER_INDEXER_K_QUANT_CACHE(1);
|
|
} else if (num_tokens < 64) {
|
|
CALL_CP_GATHER_INDEXER_K_QUANT_CACHE(2);
|
|
} else if (num_tokens < 128) {
|
|
CALL_CP_GATHER_INDEXER_K_QUANT_CACHE(4);
|
|
} else if (num_tokens < 256) {
|
|
CALL_CP_GATHER_INDEXER_K_QUANT_CACHE(8);
|
|
} else if (num_tokens < 512) {
|
|
CALL_CP_GATHER_INDEXER_K_QUANT_CACHE(16);
|
|
} else {
|
|
CALL_CP_GATHER_INDEXER_K_QUANT_CACHE(32);
|
|
}
|
|
}
|