mirror of
https://github.com/vllm-project/vllm.git
synced 2025-10-20 14:53:52 +08:00
[Docs] Fix warnings in mkdocs build (continued) (#25163)
Signed-off-by: Zerohertz <ohg3417@gmail.com>
This commit is contained in:
@ -253,7 +253,7 @@ class SingleWriterShmRingBuffer:
|
||||
|
||||
Args:
|
||||
nbytes (int, optional): The size of the buffer to free. If None,
|
||||
frees the maximum size of the ring buffer.
|
||||
frees the maximum size of the ring buffer.
|
||||
'''
|
||||
|
||||
assert self.is_writer, "Only the writer can free buffers."
|
||||
|
@ -697,9 +697,7 @@ class OpenAIServing:
|
||||
add_special_tokens: bool = True,
|
||||
) -> TextTokensPrompt:
|
||||
"""
|
||||
A simpler implementation of
|
||||
[`_tokenize_prompt_input_or_inputs`][vllm.entrypoints.openai.serving_engine.OpenAIServing._tokenize_prompt_input_or_inputs]
|
||||
that assumes single input.
|
||||
A simpler implementation that tokenizes a single prompt input.
|
||||
"""
|
||||
async for result in self._tokenize_prompt_inputs_async(
|
||||
request,
|
||||
@ -718,9 +716,7 @@ class OpenAIServing:
|
||||
add_special_tokens: bool = True,
|
||||
) -> AsyncGenerator[TextTokensPrompt, None]:
|
||||
"""
|
||||
A simpler implementation of
|
||||
[`_tokenize_prompt_input_or_inputs`][vllm.entrypoints.openai.serving_engine.OpenAIServing._tokenize_prompt_input_or_inputs]
|
||||
that assumes multiple inputs.
|
||||
A simpler implementation that tokenizes multiple prompt inputs.
|
||||
"""
|
||||
for prompt in prompt_inputs:
|
||||
if isinstance(prompt, str):
|
||||
|
Reference in New Issue
Block a user