mirror of
https://github.com/vllm-project/vllm.git
synced 2025-10-20 23:03:52 +08:00
Compare commits
2 Commits
il_tool
...
v0.2.1.pos
Author | SHA1 | Date | |
---|---|---|---|
3d40c834f0 | |||
d0fb047de3 |
@ -8,7 +8,7 @@ from vllm.entrypoints.llm import LLM
|
||||
from vllm.outputs import CompletionOutput, RequestOutput
|
||||
from vllm.sampling_params import SamplingParams
|
||||
|
||||
__version__ = "0.2.1"
|
||||
__version__ = "0.2.1.post1"
|
||||
|
||||
__all__ = [
|
||||
"LLM",
|
||||
|
@ -401,6 +401,12 @@ class SequenceGroupOutputs:
|
||||
return (f"SequenceGroupOutputs(samples={self.samples}, "
|
||||
f"prompt_logprobs={self.prompt_logprobs})")
|
||||
|
||||
def __eq__(self, other: object) -> bool:
|
||||
if not isinstance(other, SequenceGroupOutputs):
|
||||
raise NotImplementedError()
|
||||
return (self.samples == other.samples
|
||||
and self.prompt_logprobs == other.prompt_logprobs)
|
||||
|
||||
|
||||
# For each sequence group, we generate a list of SequenceOutputs object,
|
||||
# each of which contains one possible candidate for the next token.
|
||||
|
Reference in New Issue
Block a user