mirror of
https://github.com/vllm-project/vllm.git
synced 2025-10-20 14:53:52 +08:00
[Misc] Minor fix in KVCache type (#3652)
This commit is contained in:
@ -56,8 +56,8 @@ Next, you need to rewrite the :code:`forward` methods of your model by following
|
||||
- return_dict: Optional[bool] = None,
|
||||
-) -> Union[Tuple, CausalLMOutputWithPast]:
|
||||
+ positions: torch.Tensor,
|
||||
+ kv_caches: List[KVCache],
|
||||
+ input_metadata: InputMetadata,
|
||||
+ kv_caches: List[torch.Tensor],
|
||||
+ attn_metadata: AttentionMetadata,
|
||||
+) -> Optional[SamplerOutput]:
|
||||
|
||||
1. Update the code by considering that :code:`input_ids` and :code:`positions` are now flattened tensors.
|
||||
|
@ -1,4 +1,4 @@
|
||||
from typing import List, Optional, Tuple
|
||||
from typing import List, Optional
|
||||
|
||||
import torch
|
||||
from torch import nn
|
||||
@ -19,8 +19,6 @@ from vllm.model_executor.weight_utils import (default_weight_loader,
|
||||
hf_model_weights_iterator)
|
||||
from vllm.sequence import SamplerOutput
|
||||
|
||||
KVCache = Tuple[torch.Tensor, torch.Tensor]
|
||||
|
||||
_KEYS_TO_MODIFY_MAPPING = {
|
||||
"language_model.lm_head": "lm_head",
|
||||
"language_model.model": "language_model",
|
||||
@ -102,7 +100,7 @@ class LlavaForConditionalGeneration(nn.Module):
|
||||
self,
|
||||
input_ids: torch.Tensor,
|
||||
positions: torch.Tensor,
|
||||
kv_caches: List[KVCache],
|
||||
kv_caches: List[torch.Tensor],
|
||||
attn_metadata: AttentionMetadata,
|
||||
image_input: Optional[torch.Tensor] = None
|
||||
) -> SamplerOutput: # noqa: E501
|
||||
|
@ -14,8 +14,6 @@ from vllm.utils import (async_tensor_h2d, is_pin_memory_available,
|
||||
|
||||
logger = init_logger(__name__)
|
||||
|
||||
KVCache = Tuple[torch.Tensor, torch.Tensor]
|
||||
|
||||
|
||||
class NeuronModelRunner:
|
||||
|
||||
|
Reference in New Issue
Block a user