mirror of
https://github.com/vllm-project/vllm.git
synced 2025-10-20 14:53:52 +08:00
[CI/Build] Replace math.isclose
with pytest.approx
(#18703)
Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk>
This commit is contained in:
@ -1,5 +1,4 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
import math
|
||||
import os
|
||||
|
||||
import pytest
|
||||
@ -39,4 +38,4 @@ def test_mteb(server):
|
||||
print("SentenceTransformer main score: ", st_main_score)
|
||||
print("Difference: ", st_main_score - vllm_main_score)
|
||||
|
||||
assert math.isclose(st_main_score, vllm_main_score, rel_tol=1e-4)
|
||||
assert st_main_score == pytest.approx(vllm_main_score, rel=1e-4)
|
||||
|
@ -1,6 +1,4 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import math
|
||||
from typing import Any
|
||||
|
||||
import pytest
|
||||
@ -92,7 +90,7 @@ class TestModel:
|
||||
hf_outputs = run_transformers(runner, model, text_pairs)
|
||||
|
||||
for i in range(len(vllm_outputs)):
|
||||
assert math.isclose(hf_outputs[i], vllm_outputs[i], rel_tol=0.01)
|
||||
assert hf_outputs[i] == pytest.approx(vllm_outputs[i], rel=0.01)
|
||||
|
||||
def test_text_1_list_text_2_list(self, server: RemoteOpenAIServer,
|
||||
model: dict[str, Any], runner):
|
||||
@ -124,7 +122,7 @@ class TestModel:
|
||||
hf_outputs = run_transformers(runner, model, text_pairs)
|
||||
|
||||
for i in range(len(vllm_outputs)):
|
||||
assert math.isclose(hf_outputs[i], vllm_outputs[i], rel_tol=0.01)
|
||||
assert hf_outputs[i] == pytest.approx(vllm_outputs[i], rel=0.01)
|
||||
|
||||
def test_text_1_str_text_2_str(self, server: RemoteOpenAIServer,
|
||||
model: dict[str, Any], runner):
|
||||
@ -150,7 +148,7 @@ class TestModel:
|
||||
hf_outputs = run_transformers(runner, model, text_pairs)
|
||||
|
||||
for i in range(len(vllm_outputs)):
|
||||
assert math.isclose(hf_outputs[i], vllm_outputs[i], rel_tol=0.01)
|
||||
assert hf_outputs[i] == pytest.approx(vllm_outputs[i], rel=0.01)
|
||||
|
||||
def test_score_max_model_len(self, server: RemoteOpenAIServer,
|
||||
model: dict[str, Any]):
|
||||
|
@ -1,5 +1,4 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
import math
|
||||
from collections.abc import Sequence
|
||||
|
||||
import mteb
|
||||
@ -115,4 +114,4 @@ def mteb_test_embed_models(hf_runner,
|
||||
print("SentenceTransformer:", model_dtype, st_main_score)
|
||||
print("Difference:", st_main_score - vllm_main_score)
|
||||
|
||||
assert math.isclose(st_main_score, vllm_main_score, rel_tol=MTEB_EMBED_TOL)
|
||||
assert st_main_score == pytest.approx(vllm_main_score, rel=MTEB_EMBED_TOL)
|
||||
|
@ -2,7 +2,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import importlib.util
|
||||
import math
|
||||
from array import array
|
||||
|
||||
import openai
|
||||
@ -104,16 +103,16 @@ def get_test_data():
|
||||
|
||||
def validate_embed_output(q_rep: list[list[float]], d_rep: list[list[float]]):
|
||||
cosine_sim_q0_d0 = 1 - cosine(q_rep[0], d_rep[0])
|
||||
assert math.isclose(cosine_sim_q0_d0, 0.609, abs_tol=0.001)
|
||||
assert cosine_sim_q0_d0 == pytest.approx(0.609, abs=0.001)
|
||||
|
||||
cosine_sim_q0_d1 = 1 - cosine(q_rep[0], d_rep[1])
|
||||
assert math.isclose(cosine_sim_q0_d1, 0.101, abs_tol=0.001)
|
||||
assert cosine_sim_q0_d1 == pytest.approx(0.101, abs=0.001)
|
||||
|
||||
cosine_sim_q1_d0 = 1 - cosine(q_rep[1], d_rep[0])
|
||||
assert math.isclose(cosine_sim_q1_d0, 0.120, abs_tol=0.001)
|
||||
assert cosine_sim_q1_d0 == pytest.approx(0.120, abs=0.001)
|
||||
|
||||
cosine_sim_q1_d1 = 1 - cosine(q_rep[1], d_rep[1])
|
||||
assert math.isclose(cosine_sim_q1_d1, 0.534, abs_tol=0.001)
|
||||
assert cosine_sim_q1_d1 == pytest.approx(0.534, abs=0.001)
|
||||
|
||||
|
||||
def test_gritlm_offline_embedding(vllm_runner):
|
||||
|
@ -1,6 +1,4 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
import math
|
||||
|
||||
import pytest
|
||||
|
||||
from vllm import PoolingParams
|
||||
@ -60,7 +58,7 @@ def test_llm_1_to_1(vllm_runner, hf_runner, model_name, dtype: str):
|
||||
assert len(vllm_outputs) == 1
|
||||
assert len(hf_outputs) == 1
|
||||
|
||||
assert math.isclose(hf_outputs[0], vllm_outputs[0], rel_tol=0.01)
|
||||
assert hf_outputs[0] == pytest.approx(vllm_outputs[0], rel=0.01)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("dtype", ["half"])
|
||||
@ -78,8 +76,8 @@ def test_llm_1_to_N(vllm_runner, hf_runner, model_name, dtype: str):
|
||||
assert len(vllm_outputs) == 10
|
||||
assert len(hf_outputs) == 10
|
||||
|
||||
assert math.isclose(hf_outputs[0], vllm_outputs[0], rel_tol=0.01)
|
||||
assert math.isclose(hf_outputs[1], vllm_outputs[1], rel_tol=0.01)
|
||||
assert hf_outputs[0] == pytest.approx(vllm_outputs[0], rel=0.01)
|
||||
assert hf_outputs[1] == pytest.approx(vllm_outputs[1], rel=0.01)
|
||||
|
||||
|
||||
@pytest.fixture(scope="module", params=EMBEDDING_MODELS)
|
||||
|
@ -1,6 +1,4 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
import math
|
||||
|
||||
import pytest
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
@ -45,7 +43,7 @@ def test_cross_encoder_1_to_1(vllm_runner, hf_runner, model_name):
|
||||
assert len(vllm_outputs) == 1
|
||||
assert len(hf_outputs) == 1
|
||||
|
||||
assert math.isclose(hf_outputs[0], vllm_outputs[0], rel_tol=0.01)
|
||||
assert hf_outputs[0] == pytest.approx(vllm_outputs[0], rel=0.01)
|
||||
|
||||
|
||||
def test_cross_encoder_1_to_N(vllm_runner, hf_runner, model_name):
|
||||
@ -64,8 +62,8 @@ def test_cross_encoder_1_to_N(vllm_runner, hf_runner, model_name):
|
||||
assert len(vllm_outputs) == 2
|
||||
assert len(hf_outputs) == 2
|
||||
|
||||
assert math.isclose(hf_outputs[0], vllm_outputs[0], rel_tol=0.01)
|
||||
assert math.isclose(hf_outputs[1], vllm_outputs[1], rel_tol=0.01)
|
||||
assert hf_outputs[0] == pytest.approx(vllm_outputs[0], rel=0.01)
|
||||
assert hf_outputs[1] == pytest.approx(vllm_outputs[1], rel=0.01)
|
||||
|
||||
|
||||
def test_cross_encoder_N_to_N(vllm_runner, hf_runner, model_name):
|
||||
@ -84,8 +82,8 @@ def test_cross_encoder_N_to_N(vllm_runner, hf_runner, model_name):
|
||||
assert len(vllm_outputs) == 2
|
||||
assert len(hf_outputs) == 2
|
||||
|
||||
assert math.isclose(hf_outputs[0], vllm_outputs[0], rel_tol=0.01)
|
||||
assert math.isclose(hf_outputs[1], vllm_outputs[1], rel_tol=0.01)
|
||||
assert hf_outputs[0] == pytest.approx(vllm_outputs[0], rel=0.01)
|
||||
assert hf_outputs[1] == pytest.approx(vllm_outputs[1], rel=0.01)
|
||||
|
||||
|
||||
@pytest.fixture(scope="module", params=EMBEDDING_MODELS)
|
||||
@ -112,7 +110,7 @@ def test_embedding_1_to_1(vllm_runner, hf_runner, emb_model_name):
|
||||
assert len(vllm_outputs) == 1
|
||||
assert len(hf_outputs) == 1
|
||||
|
||||
assert math.isclose(hf_outputs[0], vllm_outputs[0], rel_tol=0.01)
|
||||
assert hf_outputs[0] == pytest.approx(vllm_outputs[0], rel=0.01)
|
||||
|
||||
|
||||
def test_embedding_1_to_N(vllm_runner, hf_runner, emb_model_name):
|
||||
@ -140,8 +138,8 @@ def test_embedding_1_to_N(vllm_runner, hf_runner, emb_model_name):
|
||||
assert len(vllm_outputs) == 2
|
||||
assert len(hf_outputs) == 2
|
||||
|
||||
assert math.isclose(hf_outputs[0], vllm_outputs[0], rel_tol=0.01)
|
||||
assert math.isclose(hf_outputs[1], vllm_outputs[1], rel_tol=0.01)
|
||||
assert hf_outputs[0] == pytest.approx(vllm_outputs[0], rel=0.01)
|
||||
assert hf_outputs[1] == pytest.approx(vllm_outputs[1], rel=0.01)
|
||||
|
||||
|
||||
def test_embedding_N_to_N(vllm_runner, hf_runner, emb_model_name):
|
||||
@ -169,5 +167,5 @@ def test_embedding_N_to_N(vllm_runner, hf_runner, emb_model_name):
|
||||
assert len(vllm_outputs) == 2
|
||||
assert len(hf_outputs) == 2
|
||||
|
||||
assert math.isclose(hf_outputs[0], vllm_outputs[0], rel_tol=0.01)
|
||||
assert math.isclose(hf_outputs[1], vllm_outputs[1], rel_tol=0.01)
|
||||
assert hf_outputs[0] == pytest.approx(vllm_outputs[0], rel=0.01)
|
||||
assert hf_outputs[1] == pytest.approx(vllm_outputs[1], rel=0.01)
|
||||
|
Reference in New Issue
Block a user