mirror of
https://github.com/vllm-project/vllm.git
synced 2025-10-20 14:53:52 +08:00
11 lines
429 B
YAML
11 lines
429 B
YAML
# For hf script, without -t option (tensor parallel size).
|
|
# bash .buildkite/lm-eval-harness/run-lm-eval-mmlupro-vllm-baseline.sh -m meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8 -l 250 -t 8 -f 5
|
|
model_name: "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8"
|
|
tasks:
|
|
- name: "mmlu_pro"
|
|
metrics:
|
|
- name: "exact_match,custom-extract"
|
|
value: 0.80
|
|
limit: 250 # will run on 250 * 14 subjects = 3500 samples
|
|
num_fewshot: 5
|