mirror of
https://github.com/vllm-project/vllm.git
synced 2025-10-20 14:53:52 +08:00
Fix precommit fail in fused_moe intermediate_cache2 chunking (#13772)
Signed-off-by: mgoin <mgoin64@gmail.com>
This commit is contained in:
@ -1271,7 +1271,8 @@ def fused_experts_impl(hidden_states: torch.Tensor,
|
||||
# so the cache size and config are already set correctly and
|
||||
# do not need to be adjusted.
|
||||
intermediate_cache1 = intermediate_cache1[:tokens_in_chunk]
|
||||
intermediate_cache2 = intermediate_cache2[:tokens_in_chunk * topk_ids.shape[1]]
|
||||
intermediate_cache2 = intermediate_cache2[:tokens_in_chunk *
|
||||
topk_ids.shape[1]]
|
||||
intermediate_cache3 = intermediate_cache3[:tokens_in_chunk]
|
||||
config = get_config_func(tokens_in_chunk)
|
||||
|
||||
|
Reference in New Issue
Block a user