mirror of
https://github.com/vllm-project/vllm.git
synced 2025-10-20 14:53:52 +08:00
Signed-off-by: Barry Kang <43644113+Barry-Delaney@users.noreply.github.com> Signed-off-by: Simon Mo <simon.mo@hey.com> Signed-off-by: Chen Zhang <zhangch99@outlook.com> Co-authored-by: Simon Mo <simon.mo@hey.com> Co-authored-by: Yongye Zhu <zyy1102000@gmail.com> Co-authored-by: Chen Zhang <zhangch99@outlook.com>
74 lines
3.4 KiB
C++
74 lines
3.4 KiB
C++
#pragma once
|
|
|
|
#include <torch/all.h>
|
|
|
|
#include <map>
|
|
#include <vector>
|
|
|
|
void swap_blocks(torch::Tensor& src, torch::Tensor& dst,
|
|
const torch::Tensor& block_mapping);
|
|
|
|
// Note: the key_caches and value_caches vectors are constant but
|
|
// not the Tensors they contain. The vectors need to be const refs
|
|
// in order to satisfy pytorch's C++ operator registration code.
|
|
void copy_blocks(std::vector<torch::Tensor> const& key_caches,
|
|
std::vector<torch::Tensor> const& value_caches,
|
|
const torch::Tensor& block_mapping);
|
|
|
|
void copy_blocks_mla(std::vector<torch::Tensor> const& kv_caches,
|
|
const torch::Tensor& block_mapping);
|
|
|
|
void reshape_and_cache(torch::Tensor& key, torch::Tensor& value,
|
|
torch::Tensor& key_cache, torch::Tensor& value_cache,
|
|
torch::Tensor& slot_mapping,
|
|
const std::string& kv_cache_dtype,
|
|
torch::Tensor& k_scale, torch::Tensor& v_scale);
|
|
|
|
void reshape_and_cache_flash(torch::Tensor& key, torch::Tensor& value,
|
|
torch::Tensor& key_cache,
|
|
torch::Tensor& value_cache,
|
|
torch::Tensor& slot_mapping,
|
|
const std::string& kv_cache_dtype,
|
|
torch::Tensor& k_scale, torch::Tensor& v_scale);
|
|
|
|
void concat_and_cache_mla(torch::Tensor& kv_c, torch::Tensor& k_pe,
|
|
torch::Tensor& kv_cache, torch::Tensor& slot_mapping,
|
|
const std::string& kv_cache_dtype,
|
|
torch::Tensor& scale);
|
|
|
|
// Just for unittest
|
|
void convert_fp8(torch::Tensor& dst_cache, torch::Tensor& src_cache,
|
|
const double scale, const std::string& kv_cache_dtype);
|
|
|
|
void gather_and_maybe_dequant_cache(
|
|
torch::Tensor const& src_cache, // [NUM_BLOCKS, BLOCK_SIZE, ENTRIES...]
|
|
torch::Tensor const& dst, // [TOT_TOKENS, ENTRIES...]
|
|
torch::Tensor const& block_table, // [BATCH, BLOCK_INDICES]
|
|
torch::Tensor const& cu_seq_lens, // [BATCH+1]
|
|
int64_t batch_size, const std::string& kv_cache_dtype,
|
|
torch::Tensor const& scale,
|
|
std::optional<torch::Tensor> seq_starts = std::nullopt);
|
|
|
|
// TODO(hc): cp_gather_cache need support scaled kvcahe in the future.
|
|
void cp_gather_cache(
|
|
torch::Tensor const& src_cache, // [NUM_BLOCKS, BLOCK_SIZE, ENTRIES...]
|
|
torch::Tensor const& dst, // [TOT_TOKENS, ENTRIES...]
|
|
torch::Tensor const& block_table, // [BATCH, BLOCK_INDICES]
|
|
torch::Tensor const& cu_seq_lens, // [BATCH+1]
|
|
int64_t batch_size, std::optional<torch::Tensor> seq_starts = std::nullopt);
|
|
|
|
// Indexer K quantization and cache function
|
|
void indexer_k_quant_and_cache(
|
|
torch::Tensor& k, // [num_tokens, head_dim]
|
|
torch::Tensor& kv_cache, // [num_blocks, block_size, cache_stride]
|
|
torch::Tensor& slot_mapping, // [num_tokens]
|
|
int64_t quant_block_size, // quantization block size
|
|
const std::string& scale_fmt);
|
|
|
|
// Extract function to gather quantized K cache
|
|
void cp_gather_indexer_k_quant_cache(
|
|
const torch::Tensor& kv_cache, // [num_blocks, block_size, cache_stride]
|
|
torch::Tensor& dst_k, // [num_tokens, head_dim]
|
|
torch::Tensor& dst_scale, // [num_tokens, head_dim / quant_block_size * 4]
|
|
const torch::Tensor& block_table, // [batch_size, num_blocks]
|
|
const torch::Tensor& cu_seq_lens); // [batch_size + 1]
|