Fix bad lm-eval fork (#21318)

This commit is contained in:
Michael Goin
2025-07-21 13:47:51 -04:00
committed by GitHub
parent 29d1ffc5b4
commit 005ae9be6c

View File

@ -273,7 +273,7 @@ steps:
# VLLM_USE_FLASHINFER_SAMPLER or not on H100.
- pytest -v -s v1/e2e
# Integration test for streaming correctness (requires special branch).
- pip install -U git+https://github.com/robertgshaw2-neuralmagic/lm-evaluation-harness.git@streaming-api
- pip install -U git+https://github.com/robertgshaw2-redhat/lm-evaluation-harness.git@streaming-api
- pytest -v -s entrypoints/openai/correctness/test_lmeval.py::test_lm_eval_accuracy_v1_engine
- label: Examples Test # 25min