mirror of
https://github.com/vllm-project/vllm.git
synced 2025-10-20 23:03:52 +08:00
Compare commits
1 Commits
v0.10.2rc3
...
benchmark
Author | SHA1 | Date | |
---|---|---|---|
b6381ced9c |
@ -30,11 +30,17 @@ from datasets import load_dataset
|
||||
from PIL import Image
|
||||
from transformers import PreTrainedTokenizerBase
|
||||
|
||||
from vllm.lora.request import LoRARequest
|
||||
from vllm.lora.utils import get_adapter_absolute_path
|
||||
from vllm.multimodal import MultiModalDataDict
|
||||
from vllm.multimodal.image import convert_image_mode
|
||||
from vllm.transformers_utils.tokenizer import AnyTokenizer, get_lora_tokenizer
|
||||
try:
|
||||
from vllm.lora.request import LoRARequest
|
||||
from vllm.lora.utils import get_adapter_absolute_path
|
||||
from vllm.multimodal import MultiModalDataDict
|
||||
from vllm.multimodal.image import convert_image_mode
|
||||
from vllm.transformers_utils.tokenizer import AnyTokenizer, get_lora_tokenizer
|
||||
except:
|
||||
MultiModalDataDict = None
|
||||
AnyTokenizer = None
|
||||
LoRARequest = None
|
||||
print("Install vLLM to use LoRA or Multimodal benchmarking.")
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
Reference in New Issue
Block a user