mirror of
https://github.com/vllm-project/vllm.git
synced 2025-11-17 16:33:17 +08:00
Compare commits
252 Commits
v0.11.1rc5
...
add-symm-m
| Author | SHA1 | Date | |
|---|---|---|---|
| 8a8eed2a7b | |||
| 9c84ca8293 | |||
| 6d54336ae5 | |||
| 34553b9d27 | |||
| b039bfda8f | |||
| d0e186c16f | |||
| f080a83511 | |||
| 40e2eeeb92 | |||
| b06b9470ca | |||
| 4673e465ff | |||
| 912744d066 | |||
| 15be507c86 | |||
| 6f7de33bed | |||
| a98cc35c34 | |||
| e8697faf03 | |||
| 03fa4d3fb3 | |||
| 6b2b9fd934 | |||
| c5f685b3ae | |||
| c4768dcf47 | |||
| a65a934ebe | |||
| 4a8d6bd168 | |||
| 636efd10a5 | |||
| 289eb6c537 | |||
| 19d91ece4b | |||
| 7ae5a5fb11 | |||
| de2b78305f | |||
| e5e9067e61 | |||
| 3a7d580343 | |||
| 05f8d69077 | |||
| 404d7a9d14 | |||
| 171133f929 | |||
| 32787d0644 | |||
| 975676d174 | |||
| 77d702a22b | |||
| 2108a571d7 | |||
| 47604137a2 | |||
| 26990d25dc | |||
| d9ab1ad9d1 | |||
| 608bb14462 | |||
| 4a36681f85 | |||
| d15afc1fd0 | |||
| 934a9c3b79 | |||
| 70af44fd10 | |||
| 781f5ebf52 | |||
| 0852527647 | |||
| 61d25dc44b | |||
| d0c7792004 | |||
| b158df2813 | |||
| 1aaecda078 | |||
| 811df41ee9 | |||
| 67a2da890e | |||
| da786e339e | |||
| 18903216f5 | |||
| d0ceb38ae8 | |||
| 155ad56d7b | |||
| 5fb4137c99 | |||
| 68a72a5cc1 | |||
| 0f872b7977 | |||
| 4b1ff13221 | |||
| e0d6b4a867 | |||
| 72b1c2ae2c | |||
| e0919f331d | |||
| 8e19d470af | |||
| 1958bda9b4 | |||
| 7bdb42b2f2 | |||
| 315068eb4a | |||
| ccd98b59c1 | |||
| 21b82f4ea2 | |||
| a736e5ff77 | |||
| 9da9208b20 | |||
| 11fd69dd54 | |||
| c0a4b95d64 | |||
| a47d94f18c | |||
| e70fbc599b | |||
| 4bf56c79cc | |||
| 59b453eaa2 | |||
| 827e4237bc | |||
| ca6f755d24 | |||
| ca90f50304 | |||
| da855b42d2 | |||
| 449de9001a | |||
| d4aa65c998 | |||
| 7a8375f8a0 | |||
| 5e0c1fe69c | |||
| 4507a6dae4 | |||
| d1dd5f53e4 | |||
| e52e4da971 | |||
| 2176778cd3 | |||
| 0370679ce9 | |||
| 8816e375d3 | |||
| f32229293e | |||
| c757a15f0f | |||
| 59a50afa08 | |||
| 981cadb35c | |||
| c3ee80a01a | |||
| 3755c14532 | |||
| 201dc98acc | |||
| a404e2c0f1 | |||
| e31946f86e | |||
| bde5039325 | |||
| d72299d47b | |||
| 80679f108f | |||
| 43ecd0a900 | |||
| 07d614511f | |||
| f948ab6945 | |||
| d71af5f502 | |||
| 90189c71a9 | |||
| d79d9f0780 | |||
| b6a248bdd7 | |||
| 1767658559 | |||
| efe73e9b57 | |||
| 0b8e871e5e | |||
| 5ee93a5956 | |||
| e15601789b | |||
| 65ac8d8dc4 | |||
| ffb08379d8 | |||
| e04492449e | |||
| 518ec6b722 | |||
| 802748bddb | |||
| faedbb4d4f | |||
| 40db194446 | |||
| c765f0b443 | |||
| 002b07c4b2 | |||
| 752ddeacaa | |||
| c18f88c6ca | |||
| 6fd0df8132 | |||
| 3f5a4b6473 | |||
| 6cae1e5332 | |||
| 80c9275348 | |||
| e50c454672 | |||
| 5d16d0fa62 | |||
| 0606bea2b6 | |||
| 6e97eccf5d | |||
| 6ab183813c | |||
| 6b7a81185d | |||
| b57789b62b | |||
| 377061d481 | |||
| 86dca07d9b | |||
| 16b37f3119 | |||
| 0976711f3b | |||
| e261d37c9a | |||
| b7cbc25416 | |||
| d43ad5a757 | |||
| 0ff05e3770 | |||
| 428bc7bf1c | |||
| 878fd5a16f | |||
| 18b39828d9 | |||
| 4ea62b77f5 | |||
| d4e547bb7e | |||
| 2d977a7a9e | |||
| 1fb4217a05 | |||
| 611c86ea3c | |||
| dc937175d4 | |||
| 2f1cc8cef1 | |||
| 938a81692e | |||
| c9f66da8fd | |||
| 05cae69f0f | |||
| 5fd8f02ea9 | |||
| 97e3dda84b | |||
| 5a0a6dfd55 | |||
| 938772af03 | |||
| e4ee658672 | |||
| 77f8001f53 | |||
| 300a265978 | |||
| 03c4c4aa9d | |||
| 2ec401bc39 | |||
| 4022a9d279 | |||
| 53f6e81dfd | |||
| 43a6acfb7d | |||
| 58279c60b5 | |||
| 2f84ae1f27 | |||
| f32cbc9a0c | |||
| 7e4be74104 | |||
| 380ba6816d | |||
| 14a125a06d | |||
| c02fccdbd2 | |||
| 6ddae74054 | |||
| b13a447546 | |||
| 7956b0c0bc | |||
| 3758757377 | |||
| ccd3e55e51 | |||
| 01baefe674 | |||
| 786030721e | |||
| 145c00a4d3 | |||
| 55011aef24 | |||
| a4398fbb5e | |||
| 2c19d96777 | |||
| 4bc400f47e | |||
| cac4c10ef0 | |||
| f7d2946e99 | |||
| 294c805f1d | |||
| 40b69e33e7 | |||
| 32257297dd | |||
| ba464e6ae2 | |||
| 7f4bdadb92 | |||
| cec7c28833 | |||
| 18961c5ea6 | |||
| 470ad118b6 | |||
| 1bf43ae35d | |||
| 0ce743f4e1 | |||
| 6c317a656e | |||
| 00b31a36a2 | |||
| 73444b7b56 | |||
| 853a8eb53b | |||
| 758ea2e980 | |||
| 685c99ee77 | |||
| 1e88fb751b | |||
| c2ed069b32 | |||
| af6e19f50f | |||
| 99d69af9ec | |||
| d811b442d3 | |||
| 30a14b034f | |||
| 799ce45cc1 | |||
| 2c0c7c39bd | |||
| e675118849 | |||
| e2347dbf58 | |||
| 879a06579e | |||
| 29de3cdee4 | |||
| 7e2729b57e | |||
| 3a5de7d2d6 | |||
| bc4486d609 | |||
| 0cdbe7b744 | |||
| df334868ca | |||
| 0e0a638c3b | |||
| f29aeb5a25 | |||
| 5e8862e9e0 | |||
| 9e5bd3076e | |||
| fc16f1c477 | |||
| bc306fe5e9 | |||
| 103a468bbf | |||
| 70bfbd7b16 | |||
| d6517be3cd | |||
| 7e06c40e63 | |||
| 675704ac01 | |||
| 0384aa7150 | |||
| 3857eb8725 | |||
| 933cdea440 | |||
| 3933f18a5e | |||
| e5ef4dfc11 | |||
| 36960501d3 | |||
| b2e65cb4a7 | |||
| 2bf0bcc1fc | |||
| 697f507a8e | |||
| d5d2a0fe74 | |||
| c9791f1813 | |||
| e7acb20076 | |||
| 4b68c4a55b | |||
| a8141fa649 | |||
| 4917002523 | |||
| a2981c4272 | |||
| 4574d48bab | |||
| ab98f6556f |
@ -1,12 +0,0 @@
|
||||
# For vllm script, with -t option (tensor parallel size).
|
||||
# bash .buildkite/lm-eval-harness/run-lm-eval-gsm-vllm-baseline.sh -m nm-testing/Qwen2-1.5B-Instruct-W8A16-Channelwise -b "auto" -l 1000 -f 5 -t 1
|
||||
model_name: "nm-testing/Qwen2-1.5B-Instruct-W8A16-Channelwise"
|
||||
tasks:
|
||||
- name: "gsm8k"
|
||||
metrics:
|
||||
- name: "exact_match,strict-match"
|
||||
value: 0.595
|
||||
- name: "exact_match,flexible-extract"
|
||||
value: 0.582
|
||||
limit: 1000
|
||||
num_fewshot: 5
|
||||
@ -7,7 +7,7 @@ vLLM also maintains a continuous performance benchmark under [perf.vllm.ai](http
|
||||
|
||||
## Performance benchmark quick overview
|
||||
|
||||
**Benchmarking Coverage**: latency, throughput and fix-qps serving on B200, A100, H100 and Intel® Xeon® Processors, with different models.
|
||||
**Benchmarking Coverage**: latency, throughput and fix-qps serving on B200, A100, H100, Intel® Xeon® Processors and Intel® Gaudi® 3 Accelerators with different models.
|
||||
|
||||
**Benchmarking Duration**: about 1hr.
|
||||
|
||||
@ -34,6 +34,7 @@ Runtime environment variables:
|
||||
|
||||
See [performance-benchmarks-descriptions.md](performance-benchmarks-descriptions.md) for detailed descriptions, and use `tests/latency-tests.json`, `tests/throughput-tests.json`, `tests/serving-tests.json` to configure the test cases.
|
||||
> NOTE: For Intel® Xeon® Processors, use `tests/latency-tests-cpu.json`, `tests/throughput-tests-cpu.json`, `tests/serving-tests-cpu.json` instead.
|
||||
For Intel® Gaudi® 3 Accelerators, use `tests/latency-tests-hpu.json`, `tests/throughput-tests-hpu.json`, `tests/serving-tests-hpu.json` instead.
|
||||
>
|
||||
### Latency test
|
||||
|
||||
|
||||
@ -5,7 +5,7 @@
|
||||
- Input length: 32 tokens.
|
||||
- Output length: 128 tokens.
|
||||
- Batch size: fixed (8).
|
||||
- GPU Models: llama-3.1 8B, llama-3 70B, mixtral 8x7B.
|
||||
- GPU/HPU Models: llama-3.1 8B, llama-3 70B, mixtral 8x7B.
|
||||
- CPU Models: llama-3.1 8B.
|
||||
- Evaluation metrics: end-to-end latency (mean, median, p99).
|
||||
|
||||
@ -16,7 +16,7 @@
|
||||
- Input length: randomly sample 200 prompts from ShareGPT dataset (with fixed random seed).
|
||||
- Output length: the corresponding output length of these 200 prompts.
|
||||
- Batch size: dynamically determined by vllm to achieve maximum throughput.
|
||||
- GPU Models: llama-3.1 8B, llama-3 70B, mixtral 8x7B.
|
||||
- GPU/HPU Models: llama-3.1 8B, llama-3 70B, mixtral 8x7B.
|
||||
- CPU Models: llama-3.1 8B.
|
||||
- Evaluation metrics: throughput.
|
||||
|
||||
@ -28,7 +28,7 @@
|
||||
- Output length: the corresponding output length of these 200 prompts.
|
||||
- Batch size: dynamically determined by vllm and the arrival pattern of the requests.
|
||||
- **Average QPS (query per second)**: 1, 4, 16 and inf. QPS = inf means all requests come at once. For other QPS values, the arrival time of each query is determined using a random Poisson process (with fixed random seed).
|
||||
- GPU Models: llama-3.1 8B, llama-3 70B, mixtral 8x7B.
|
||||
- GPU/HPU Models: llama-3.1 8B, llama-3 70B, mixtral 8x7B.
|
||||
- We also added a speculative decoding test for llama-3 70B on GPU, under QPS 2
|
||||
- CPU Models: llama-3.1 8B.
|
||||
- Evaluation metrics: throughput, TTFT (time to the first token, with mean, median and p99), ITL (inter-token latency, with mean, median and p99).
|
||||
|
||||
@ -15,6 +15,8 @@ check_gpus() {
|
||||
declare -g gpu_count=$(nvidia-smi --list-gpus | wc -l)
|
||||
elif command -v amd-smi; then
|
||||
declare -g gpu_count=$(amd-smi list | grep 'GPU' | wc -l)
|
||||
elif command -v hl-smi; then
|
||||
declare -g gpu_count=$(hl-smi --list | grep -i "Module ID" | wc -l)
|
||||
fi
|
||||
|
||||
if [[ $gpu_count -gt 0 ]]; then
|
||||
@ -23,10 +25,16 @@ check_gpus() {
|
||||
echo "Need at least 1 GPU to run benchmarking."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
declare -g arch_suffix=''
|
||||
|
||||
if command -v nvidia-smi; then
|
||||
declare -g gpu_type=$(nvidia-smi --query-gpu=name --format=csv,noheader | awk '{print $2}')
|
||||
elif command -v amd-smi; then
|
||||
declare -g gpu_type=$(amd-smi static -g 0 -a | grep 'MARKET_NAME' | awk '{print $2}')
|
||||
elif command -v hl-smi; then
|
||||
declare -g gpu_type=$(hl-smi -q | grep "Product Name" | head -n 1 | awk -F ':' '{print $2}' | sed 's/^ *//')
|
||||
arch_suffix='-hpu'
|
||||
fi
|
||||
echo "GPU type is $gpu_type"
|
||||
}
|
||||
@ -138,6 +146,10 @@ kill_gpu_processes() {
|
||||
while [ "$(amd-smi metric -g 0 | grep 'USED_VRAM' | awk '{print $2}')" -ge 1000 ]; do
|
||||
sleep 1
|
||||
done
|
||||
elif command -v hl-smi; then
|
||||
while [ "$(hl-smi -q | grep "Used" | head -n 1 | awk '{print $3}')" -ge 1000 ]; do
|
||||
sleep 1
|
||||
done
|
||||
fi
|
||||
|
||||
# remove vllm config file
|
||||
@ -451,6 +463,7 @@ main() {
|
||||
ARCH='-cpu'
|
||||
else
|
||||
check_gpus
|
||||
ARCH="$arch_suffix"
|
||||
fi
|
||||
check_hf_token
|
||||
|
||||
|
||||
@ -0,0 +1,55 @@
|
||||
[
|
||||
{
|
||||
"test_name": "latency_llama8B_tp1",
|
||||
"environment_variables": {
|
||||
"PT_HPU_LAZY_MODE": 1,
|
||||
"VLLM_CONTIGUOUS_PA": 1,
|
||||
"VLLM_DEFRAG": 1
|
||||
},
|
||||
"parameters": {
|
||||
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
|
||||
"tensor_parallel_size": 1,
|
||||
"load_format": "dummy",
|
||||
"num-iters-warmup": 5,
|
||||
"num-iters": 15,
|
||||
"max-model-len": 256,
|
||||
"async-scheduling": ""
|
||||
}
|
||||
},
|
||||
{
|
||||
"test_name": "latency_llama70B_tp4",
|
||||
"environment_variables": {
|
||||
"PT_HPU_LAZY_MODE": 1,
|
||||
"PT_HPU_ENABLE_LAZY_COLLECTIVES": 1,
|
||||
"VLLM_CONTIGUOUS_PA": 1,
|
||||
"VLLM_DEFRAG": 1
|
||||
},
|
||||
"parameters": {
|
||||
"model": "meta-llama/Meta-Llama-3.1-70B-Instruct",
|
||||
"tensor_parallel_size": 4,
|
||||
"load_format": "dummy",
|
||||
"num-iters-warmup": 5,
|
||||
"num-iters": 15,
|
||||
"max-model-len": 256,
|
||||
"async-scheduling": ""
|
||||
}
|
||||
},
|
||||
{
|
||||
"test_name": "latency_mixtral8x7B_tp2",
|
||||
"environment_variables": {
|
||||
"PT_HPU_LAZY_MODE": 1,
|
||||
"PT_HPU_ENABLE_LAZY_COLLECTIVES": 1,
|
||||
"VLLM_CONTIGUOUS_PA": 1,
|
||||
"VLLM_DEFRAG": 1
|
||||
},
|
||||
"parameters": {
|
||||
"model": "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
||||
"tensor_parallel_size": 2,
|
||||
"load_format": "dummy",
|
||||
"num-iters-warmup": 5,
|
||||
"num-iters": 15,
|
||||
"max-model-len": 256,
|
||||
"async-scheduling": ""
|
||||
}
|
||||
}
|
||||
]
|
||||
@ -0,0 +1,82 @@
|
||||
[
|
||||
{
|
||||
"test_name": "serving_llama8B_tp1_sharegpt",
|
||||
"qps_list": [1, 4, 16, "inf"],
|
||||
"server_environment_variables": {
|
||||
"PT_HPU_LAZY_MODE": 1,
|
||||
"VLLM_CONTIGUOUS_PA": 1,
|
||||
"VLLM_DEFRAG": 1
|
||||
},
|
||||
"server_parameters": {
|
||||
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
|
||||
"tensor_parallel_size": 1,
|
||||
"swap_space": 16,
|
||||
"disable_log_stats": "",
|
||||
"load_format": "dummy",
|
||||
"max-model-len": 2048,
|
||||
"max-num-seqs": 256,
|
||||
"async-scheduling": ""
|
||||
},
|
||||
"client_parameters": {
|
||||
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
|
||||
"backend": "vllm",
|
||||
"dataset_name": "sharegpt",
|
||||
"dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json",
|
||||
"num_prompts": 200
|
||||
}
|
||||
},
|
||||
{
|
||||
"test_name": "serving_llama70B_tp4_sharegpt",
|
||||
"qps_list": [1, 4, 16, "inf"],
|
||||
"server_environment_variables": {
|
||||
"PT_HPU_LAZY_MODE": 1,
|
||||
"PT_HPU_ENABLE_LAZY_COLLECTIVES": 1,
|
||||
"VLLM_CONTIGUOUS_PA": 1,
|
||||
"VLLM_DEFRAG": 1
|
||||
},
|
||||
"server_parameters": {
|
||||
"model": "meta-llama/Meta-Llama-3.1-70B-Instruct",
|
||||
"tensor_parallel_size": 4,
|
||||
"swap_space": 16,
|
||||
"disable_log_stats": "",
|
||||
"load_format": "dummy",
|
||||
"max-model-len": 2048,
|
||||
"max-num-seqs": 256,
|
||||
"async-scheduling": ""
|
||||
},
|
||||
"client_parameters": {
|
||||
"model": "meta-llama/Meta-Llama-3.1-70B-Instruct",
|
||||
"backend": "vllm",
|
||||
"dataset_name": "sharegpt",
|
||||
"dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json",
|
||||
"num_prompts": 200
|
||||
}
|
||||
},
|
||||
{
|
||||
"test_name": "serving_mixtral8x7B_tp2_sharegpt",
|
||||
"qps_list": [1, 4, 16, "inf"],
|
||||
"server_environment_variables": {
|
||||
"PT_HPU_LAZY_MODE": 1,
|
||||
"PT_HPU_ENABLE_LAZY_COLLECTIVES": 1,
|
||||
"VLLM_CONTIGUOUS_PA": 1,
|
||||
"VLLM_DEFRAG": 1
|
||||
},
|
||||
"server_parameters": {
|
||||
"model": "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
||||
"tensor_parallel_size": 2,
|
||||
"swap_space": 16,
|
||||
"disable_log_stats": "",
|
||||
"load_format": "dummy",
|
||||
"max-model-len": 2048,
|
||||
"max-num-seqs": 256,
|
||||
"async-scheduling": ""
|
||||
},
|
||||
"client_parameters": {
|
||||
"model": "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
||||
"backend": "vllm",
|
||||
"dataset_name": "sharegpt",
|
||||
"dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json",
|
||||
"num_prompts": 200
|
||||
}
|
||||
}
|
||||
]
|
||||
@ -0,0 +1,61 @@
|
||||
[
|
||||
{
|
||||
"test_name": "throughput_llama8B_tp1",
|
||||
"environment_variables": {
|
||||
"PT_HPU_LAZY_MODE": 1,
|
||||
"VLLM_CONTIGUOUS_PA": 1,
|
||||
"VLLM_DEFRAG": 1
|
||||
},
|
||||
"parameters": {
|
||||
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
|
||||
"tensor_parallel_size": 1,
|
||||
"load_format": "dummy",
|
||||
"dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json",
|
||||
"num_prompts": 1000,
|
||||
"backend": "vllm",
|
||||
"max-model-len": 2048,
|
||||
"max-num-seqs": 512,
|
||||
"async-scheduling": ""
|
||||
}
|
||||
},
|
||||
{
|
||||
"test_name": "throughput_llama70B_tp4",
|
||||
"environment_variables": {
|
||||
"PT_HPU_LAZY_MODE": 1,
|
||||
"PT_HPU_ENABLE_LAZY_COLLECTIVES": 1,
|
||||
"VLLM_CONTIGUOUS_PA": 1,
|
||||
"VLLM_DEFRAG": 1
|
||||
},
|
||||
"parameters": {
|
||||
"model": "meta-llama/Meta-Llama-3.1-70B-Instruct",
|
||||
"tensor_parallel_size": 4,
|
||||
"load_format": "dummy",
|
||||
"dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json",
|
||||
"num_prompts": 1000,
|
||||
"backend": "vllm",
|
||||
"max-model-len": 2048,
|
||||
"max-num-seqs": 512,
|
||||
"async-scheduling": ""
|
||||
}
|
||||
},
|
||||
{
|
||||
"test_name": "throughput_mixtral8x7B_tp2",
|
||||
"environment_variables": {
|
||||
"PT_HPU_LAZY_MODE": 1,
|
||||
"PT_HPU_ENABLE_LAZY_COLLECTIVES": 1,
|
||||
"VLLM_CONTIGUOUS_PA": 1,
|
||||
"VLLM_DEFRAG": 1
|
||||
},
|
||||
"parameters": {
|
||||
"model": "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
||||
"tensor_parallel_size": 2,
|
||||
"load_format": "dummy",
|
||||
"dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json",
|
||||
"num_prompts": 1000,
|
||||
"backend": "vllm",
|
||||
"max-model-len": 2048,
|
||||
"max-num-seqs": 512,
|
||||
"async-scheduling": ""
|
||||
}
|
||||
}
|
||||
]
|
||||
@ -116,24 +116,6 @@ steps:
|
||||
commands:
|
||||
- "bash .buildkite/scripts/annotate-release.sh"
|
||||
|
||||
- label: "Build and publish TPU release image"
|
||||
depends_on: ~
|
||||
if: build.env("NIGHTLY") == "1"
|
||||
agents:
|
||||
queue: tpu_queue_postmerge
|
||||
commands:
|
||||
- "yes | docker system prune -a"
|
||||
- "git fetch --all"
|
||||
- "DOCKER_BUILDKIT=1 docker build --build-arg max_jobs=16 --build-arg USE_SCCACHE=1 --build-arg GIT_REPO_CHECK=1 --tag vllm/vllm-tpu:nightly --tag vllm/vllm-tpu:$BUILDKITE_COMMIT --progress plain -f docker/Dockerfile.tpu ."
|
||||
- "docker push vllm/vllm-tpu:nightly"
|
||||
- "docker push vllm/vllm-tpu:$BUILDKITE_COMMIT"
|
||||
plugins:
|
||||
- docker-login#v3.0.0:
|
||||
username: vllmbot
|
||||
password-env: DOCKERHUB_TOKEN
|
||||
env:
|
||||
DOCKER_BUILDKIT: "1"
|
||||
|
||||
- input: "Provide Release version here"
|
||||
id: input-release-version
|
||||
fields:
|
||||
|
||||
@ -2,16 +2,23 @@
|
||||
|
||||
set -ex
|
||||
|
||||
# Get release version and strip leading 'v' if present
|
||||
RELEASE_VERSION=$(buildkite-agent meta-data get release-version | sed 's/^v//')
|
||||
|
||||
if [ -z "$RELEASE_VERSION" ]; then
|
||||
echo "Error: RELEASE_VERSION is empty. 'release-version' metadata might not be set or is invalid."
|
||||
exit 1
|
||||
# Get release version, default to 1.0.0.dev for nightly/per-commit builds
|
||||
RELEASE_VERSION=$(buildkite-agent meta-data get release-version 2>/dev/null | sed 's/^v//')
|
||||
if [ -z "${RELEASE_VERSION}" ]; then
|
||||
RELEASE_VERSION="1.0.0.dev"
|
||||
fi
|
||||
|
||||
buildkite-agent annotate --style 'info' --context 'release-workflow' << EOF
|
||||
To download the wheel:
|
||||
To download the wheel (by commit):
|
||||
\`\`\`
|
||||
aws s3 cp s3://vllm-wheels/${BUILDKITE_COMMIT}/vllm-${RELEASE_VERSION}-cp38-abi3-manylinux1_x86_64.whl .
|
||||
aws s3 cp s3://vllm-wheels/${BUILDKITE_COMMIT}/vllm-${RELEASE_VERSION}-cp38-abi3-manylinux2014_aarch64.whl .
|
||||
|
||||
aws s3 cp s3://vllm-wheels/${BUILDKITE_COMMIT}/vllm-${RELEASE_VERSION}+cu129-cp38-abi3-manylinux1_x86_64.whl .
|
||||
aws s3 cp s3://vllm-wheels/${BUILDKITE_COMMIT}/vllm-${RELEASE_VERSION}+cu129-cp38-abi3-manylinux1_x86_64.whl .
|
||||
\`\`\`
|
||||
|
||||
To download the wheel (by version):
|
||||
\`\`\`
|
||||
aws s3 cp s3://vllm-wheels/${RELEASE_VERSION}/vllm-${RELEASE_VERSION}-cp38-abi3-manylinux1_x86_64.whl .
|
||||
aws s3 cp s3://vllm-wheels/${RELEASE_VERSION}/vllm-${RELEASE_VERSION}-cp38-abi3-manylinux2014_aarch64.whl .
|
||||
|
||||
@ -173,6 +173,14 @@ fi
|
||||
PARALLEL_JOB_COUNT=8
|
||||
MYPYTHONPATH=".."
|
||||
|
||||
# Test that we're launching on the machine that has
|
||||
# proper access to GPUs
|
||||
render_gid=$(getent group render | cut -d: -f3)
|
||||
if [[ -z "$render_gid" ]]; then
|
||||
echo "Error: 'render' group not found. This is required for GPU access." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# check if the command contains shard flag, we will run all shards in parallel because the host have 8 GPUs.
|
||||
if [[ $commands == *"--shard-id="* ]]; then
|
||||
# assign job count as the number of shards used
|
||||
@ -186,6 +194,7 @@ if [[ $commands == *"--shard-id="* ]]; then
|
||||
--device /dev/kfd $BUILDKITE_AGENT_META_DATA_RENDER_DEVICES \
|
||||
--network=host \
|
||||
--shm-size=16gb \
|
||||
--group-add "$render_gid" \
|
||||
--rm \
|
||||
-e HIP_VISIBLE_DEVICES="${GPU}" \
|
||||
-e HF_TOKEN \
|
||||
@ -217,8 +226,8 @@ else
|
||||
--device /dev/kfd $BUILDKITE_AGENT_META_DATA_RENDER_DEVICES \
|
||||
--network=host \
|
||||
--shm-size=16gb \
|
||||
--group-add "$render_gid" \
|
||||
--rm \
|
||||
-e HIP_VISIBLE_DEVICES=0 \
|
||||
-e HF_TOKEN \
|
||||
-e AWS_ACCESS_KEY_ID \
|
||||
-e AWS_SECRET_ACCESS_KEY \
|
||||
|
||||
@ -0,0 +1,62 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euxo pipefail
|
||||
|
||||
# args: [THRESHOLD] [NUM_QUESTIONS] [START_PORT]
|
||||
THRESHOLD=${1:-0.25}
|
||||
NUM_Q=${2:-1319}
|
||||
PORT=${3:-8010}
|
||||
OUT_DIR=${OUT_DIR:-/tmp/vllm-scheduled}
|
||||
mkdir -p "${OUT_DIR}"
|
||||
|
||||
wait_for_server() {
|
||||
local port=$1
|
||||
timeout 600 bash -c '
|
||||
until curl -sf "http://127.0.0.1:'"$port"'/health" > /dev/null; do
|
||||
sleep 1
|
||||
done'
|
||||
}
|
||||
|
||||
MODEL="deepseek-ai/DeepSeek-V2-lite"
|
||||
BACKENDS=("deepep_high_throughput" "deepep_low_latency")
|
||||
|
||||
cleanup() {
|
||||
if [[ -n "${SERVER_PID:-}" ]] && kill -0 "${SERVER_PID}" 2>/dev/null; then
|
||||
kill "${SERVER_PID}" 2>/dev/null || true
|
||||
for _ in {1..20}; do
|
||||
kill -0 "${SERVER_PID}" 2>/dev/null || break
|
||||
sleep 0.5
|
||||
done
|
||||
kill -9 "${SERVER_PID}" 2>/dev/null || true
|
||||
fi
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
for BACK in "${BACKENDS[@]}"; do
|
||||
VLLM_DEEP_GEMM_WARMUP=skip \
|
||||
VLLM_ALL2ALL_BACKEND=$BACK \
|
||||
vllm serve "$MODEL" \
|
||||
--enforce-eager \
|
||||
--tensor-parallel-size 2 \
|
||||
--data-parallel-size 2 \
|
||||
--enable-expert-parallel \
|
||||
--enable-eplb \
|
||||
--trust-remote-code \
|
||||
--max-model-len 2048 \
|
||||
--port $PORT &
|
||||
SERVER_PID=$!
|
||||
wait_for_server $PORT
|
||||
|
||||
TAG=$(echo "$MODEL" | tr '/: \\n' '_____')
|
||||
OUT="${OUT_DIR}/${TAG}_${BACK}.json"
|
||||
python3 tests/evals/gsm8k/gsm8k_eval.py --host http://127.0.0.1 --port $PORT --num-questions ${NUM_Q} --save-results ${OUT}
|
||||
python3 - <<PY
|
||||
import json; acc=json.load(open('${OUT}'))['accuracy']
|
||||
print(f"${MODEL} ${BACK}: accuracy {acc:.3f}")
|
||||
assert acc >= ${THRESHOLD}, f"${MODEL} ${BACK} accuracy {acc}"
|
||||
PY
|
||||
|
||||
cleanup
|
||||
SERVER_PID=
|
||||
sleep 1
|
||||
PORT=$((PORT+1))
|
||||
done
|
||||
@ -0,0 +1,61 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euxo pipefail
|
||||
|
||||
# args: [THRESHOLD] [NUM_QUESTIONS] [START_PORT]
|
||||
THRESHOLD=${1:-0.8}
|
||||
NUM_Q=${2:-1319}
|
||||
PORT=${3:-8020}
|
||||
OUT_DIR=${OUT_DIR:-/tmp/vllm-scheduled}
|
||||
mkdir -p "${OUT_DIR}"
|
||||
|
||||
wait_for_server() {
|
||||
local port=$1
|
||||
timeout 600 bash -c '
|
||||
until curl -sf "http://127.0.0.1:'"$port"'/health" > /dev/null; do
|
||||
sleep 1
|
||||
done'
|
||||
}
|
||||
|
||||
MODEL="QWen/Qwen3-30B-A3B-FP8"
|
||||
BACKENDS=("deepep_high_throughput" "deepep_low_latency")
|
||||
|
||||
cleanup() {
|
||||
if [[ -n "${SERVER_PID:-}" ]] && kill -0 "${SERVER_PID}" 2>/dev/null; then
|
||||
kill "${SERVER_PID}" 2>/dev/null || true
|
||||
for _ in {1..20}; do
|
||||
kill -0 "${SERVER_PID}" 2>/dev/null || break
|
||||
sleep 0.5
|
||||
done
|
||||
kill -9 "${SERVER_PID}" 2>/dev/null || true
|
||||
fi
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
for BACK in "${BACKENDS[@]}"; do
|
||||
VLLM_DEEP_GEMM_WARMUP=skip \
|
||||
VLLM_ALL2ALL_BACKEND=$BACK \
|
||||
vllm serve "$MODEL" \
|
||||
--enforce-eager \
|
||||
--tensor-parallel-size 2 \
|
||||
--data-parallel-size 2 \
|
||||
--enable-expert-parallel \
|
||||
--trust-remote-code \
|
||||
--max-model-len 2048 \
|
||||
--port $PORT &
|
||||
SERVER_PID=$!
|
||||
wait_for_server $PORT
|
||||
|
||||
TAG=$(echo "$MODEL" | tr '/: \\n' '_____')
|
||||
OUT="${OUT_DIR}/${TAG}_${BACK}.json"
|
||||
python3 tests/evals/gsm8k/gsm8k_eval.py --host http://127.0.0.1 --port $PORT --num-questions ${NUM_Q} --save-results ${OUT}
|
||||
python3 - <<PY
|
||||
import json; acc=json.load(open('${OUT}'))['accuracy']
|
||||
print(f"${MODEL} ${BACK}: accuracy {acc:.3f}")
|
||||
assert acc >= ${THRESHOLD}, f"${MODEL} ${BACK} accuracy {acc}"
|
||||
PY
|
||||
|
||||
cleanup
|
||||
SERVER_PID=
|
||||
sleep 1
|
||||
PORT=$((PORT+1))
|
||||
done
|
||||
@ -48,8 +48,8 @@ steps:
|
||||
commands:
|
||||
- bash standalone_tests/pytorch_nightly_dependency.sh
|
||||
|
||||
- label: Async Engine, Inputs, Utils, Worker Test # 36min
|
||||
timeout_in_minutes: 50
|
||||
- label: Async Engine, Inputs, Utils, Worker Test # 10min
|
||||
timeout_in_minutes: 15
|
||||
mirror_hardwares: [amdexperimental, amdproduction]
|
||||
agent_pool: mi325_1
|
||||
# grade: Blocking
|
||||
@ -344,7 +344,7 @@ steps:
|
||||
- pytest -v -s v1/logits_processors
|
||||
- pytest -v -s v1/worker
|
||||
- pytest -v -s v1/spec_decode
|
||||
- pytest -v -s -m 'not cpu_test' v1/kv_connector/unit
|
||||
- pytest -v -s -m 'not cpu_test' v1/kv_connector/unit --ignore=v1/kv_connector/unit/test_lmcache_integration.py
|
||||
- pytest -v -s -m 'not cpu_test' v1/metrics
|
||||
- pytest -v -s v1/test_oracle.py
|
||||
- pytest -v -s v1/test_request.py
|
||||
@ -441,7 +441,7 @@ steps:
|
||||
--ignore=lora/test_llm_with_multi_loras.py \
|
||||
--ignore=lora/test_olmoe_tp.py \
|
||||
--ignore=lora/test_deepseekv2_tp.py \
|
||||
--ignore=lora/test_gptoss.py \
|
||||
--ignore=lora/test_gptoss_tp.py \
|
||||
--ignore=lora/test_qwen3moe_tp.py
|
||||
parallelism: 4
|
||||
|
||||
@ -616,9 +616,9 @@ steps:
|
||||
- uv pip install --system torchao==0.13.0
|
||||
- VLLM_TEST_FORCE_LOAD_FORMAT=auto pytest -v -s quantization/ --ignore quantization/test_blackwell_moe.py
|
||||
|
||||
- label: LM Eval Small Models # 53min
|
||||
timeout_in_minutes: 75
|
||||
mirror_hardwares: [amdexperimental]
|
||||
- label: LM Eval Small Models # 15min
|
||||
timeout_in_minutes: 20
|
||||
mirror_hardwares: [amdexperimental, amdproduction]
|
||||
agent_pool: mi325_1
|
||||
# grade: Blocking
|
||||
source_file_dependencies:
|
||||
@ -627,17 +627,18 @@ steps:
|
||||
commands:
|
||||
- pytest -s -v evals/gsm8k/test_gsm8k_correctness.py --config-list-file=configs/models-small.txt --tp-size=1
|
||||
|
||||
- label: OpenAI API correctness # 22min
|
||||
timeout_in_minutes: 30
|
||||
mirror_hardwares: [amdexperimental]
|
||||
- label: OpenAI API correctness # 10min
|
||||
timeout_in_minutes: 15
|
||||
mirror_hardwares: [amdexperimental, amdproduction]
|
||||
agent_pool: mi325_1
|
||||
# grade: Blocking
|
||||
source_file_dependencies:
|
||||
- csrc/
|
||||
- vllm/entrypoints/openai/
|
||||
- vllm/model_executor/models/whisper.py
|
||||
commands: # LMEval+Transcription WER check
|
||||
- pytest -s entrypoints/openai/correctness/
|
||||
commands: # LMEval
|
||||
# Transcription WER check is skipped because encoder-decoder models are not supported on ROCm, see https://github.com/vllm-project/vllm/issues/27442
|
||||
- pytest -s entrypoints/openai/correctness/ --ignore entrypoints/openai/correctness/test_transcription_api_correctness.py
|
||||
|
||||
- label: OpenAI-Compatible Tool Use # 23 min
|
||||
timeout_in_minutes: 35
|
||||
@ -858,10 +859,10 @@ steps:
|
||||
- pytest -v -s models/multimodal -m core_model --ignore models/multimodal/generation/test_whisper.py --ignore models/multimodal/processing
|
||||
- cd .. && VLLM_WORKER_MULTIPROC_METHOD=spawn pytest -v -s tests/models/multimodal/generation/test_whisper.py -m core_model # Otherwise, mp_method="spawn" doesn't work
|
||||
|
||||
- label: Multi-Modal Accuracy Eval (Small Models) # 50min
|
||||
mirror_hardwares: [amdexperimental]
|
||||
- label: Multi-Modal Accuracy Eval (Small Models) # 10min
|
||||
mirror_hardwares: [amdexperimental, amdproduction]
|
||||
agent_pool: mi325_1
|
||||
timeout_in_minutes: 70
|
||||
timeout_in_minutes: 15
|
||||
working_dir: "/vllm-workspace/.buildkite/lm-eval-harness"
|
||||
source_file_dependencies:
|
||||
- vllm/multimodal/
|
||||
@ -1217,6 +1218,8 @@ steps:
|
||||
- pytest -v -s -x lora/test_llama_tp.py
|
||||
- pytest -v -s -x lora/test_llm_with_multi_loras.py
|
||||
- pytest -v -s -x lora/test_olmoe_tp.py
|
||||
- pytest -v -s -x lora/test_gptoss_tp.py
|
||||
|
||||
|
||||
- label: Weight Loading Multiple GPU Test # 33min
|
||||
timeout_in_minutes: 45
|
||||
|
||||
@ -232,8 +232,8 @@ steps:
|
||||
commands:
|
||||
- pytest -v -s distributed/test_eplb_algo.py
|
||||
|
||||
- label: EPLB Execution Test # 5min
|
||||
timeout_in_minutes: 15
|
||||
- label: EPLB Execution Test # 10min
|
||||
timeout_in_minutes: 20
|
||||
working_dir: "/vllm-workspace/tests"
|
||||
num_gpus: 4
|
||||
source_file_dependencies:
|
||||
@ -241,6 +241,7 @@ steps:
|
||||
- tests/distributed/test_eplb_execute.py
|
||||
commands:
|
||||
- pytest -v -s distributed/test_eplb_execute.py
|
||||
- pytest -v -s distributed/test_eplb_spec_decode.py
|
||||
|
||||
- label: Metrics, Tracing Test # 12min
|
||||
timeout_in_minutes: 20
|
||||
@ -315,6 +316,7 @@ steps:
|
||||
- vllm/
|
||||
- tests/v1
|
||||
commands:
|
||||
- uv pip install --system -r /vllm-workspace/requirements/kv_connectors.txt
|
||||
# split the test to avoid interference
|
||||
- pytest -v -s -m 'not cpu_test' v1/core
|
||||
- pytest -v -s v1/executor
|
||||
@ -340,6 +342,15 @@ steps:
|
||||
commands:
|
||||
- pytest -v -s v1/attention
|
||||
|
||||
- label: V1 Test attention (B200) # 10min
|
||||
timeout_in_minutes: 30
|
||||
gpu: b200
|
||||
source_file_dependencies:
|
||||
- vllm/v1/attention
|
||||
- tests/v1/attention
|
||||
commands:
|
||||
- VLLM_DISABLE_FLASHINFER_PREFILL=1 pytest -v -s v1/attention # TODO: FI prefill is bugged and causes incorrectness, fix this
|
||||
|
||||
- label: V1 Test others (CPU) # 5 mins
|
||||
source_file_dependencies:
|
||||
- vllm/
|
||||
@ -417,7 +428,7 @@ steps:
|
||||
--ignore=lora/test_llm_with_multi_loras.py \
|
||||
--ignore=lora/test_olmoe_tp.py \
|
||||
--ignore=lora/test_deepseekv2_tp.py \
|
||||
--ignore=lora/test_gptoss.py \
|
||||
--ignore=lora/test_gptoss_tp.py \
|
||||
--ignore=lora/test_qwen3moe_tp.py
|
||||
|
||||
parallelism: 4
|
||||
@ -449,6 +460,7 @@ steps:
|
||||
- tests/compile
|
||||
commands:
|
||||
- pytest -v -s compile/test_basic_correctness.py
|
||||
- pytest -v -s compile/test_multimodal_compile.py
|
||||
- pytest -v -s compile/piecewise/
|
||||
|
||||
- label: PyTorch Fullgraph Test # 22min
|
||||
@ -460,7 +472,9 @@ steps:
|
||||
- tests/compile
|
||||
commands:
|
||||
- pytest -v -s compile/test_full_graph.py
|
||||
- pytest -v -s compile/test_fusions_e2e.py
|
||||
# Limit to no custom ops to reduce running time
|
||||
# Wrap with quotes to escape yaml and avoid starting -k string with a -
|
||||
- "pytest -v -s compile/test_fusions_e2e.py -k 'TRITON and -quant_fp8'"
|
||||
|
||||
- label: Cudagraph test
|
||||
timeout_in_minutes: 20
|
||||
@ -534,8 +548,11 @@ steps:
|
||||
|
||||
- label: Model Executor Test # 23min
|
||||
timeout_in_minutes: 35
|
||||
torch_nightly: true
|
||||
mirror_hardwares: [amdexperimental]
|
||||
source_file_dependencies:
|
||||
- vllm/engine/arg_utils.py
|
||||
- vllm/config/model.py
|
||||
- vllm/model_executor
|
||||
- tests/model_executor
|
||||
- tests/entrypoints/openai/test_tensorizer_entrypoint.py
|
||||
@ -914,6 +931,29 @@ steps:
|
||||
- pytest -v -s tests/compile/test_silu_mul_quant_fusion.py
|
||||
# this runner has 2 GPUs available even though num_gpus=2 is not set
|
||||
- pytest -v -s tests/compile/test_fusion_all_reduce.py
|
||||
# Limit to Inductor partition, no custom ops, and allreduce & attn fusion to reduce running time
|
||||
# Wrap with quotes to escape yaml
|
||||
- "pytest -v -s tests/compile/test_fusions_e2e.py::test_tp2_attn_quant_allreduce_rmsnorm -k 'True and Llama-3.1 and -quant_fp8 and -rms_norm'"
|
||||
|
||||
- label: Blackwell Fusion E2E Tests # 30 min
|
||||
timeout_in_minutes: 40
|
||||
working_dir: "/vllm-workspace/"
|
||||
gpu: b200
|
||||
optional: true
|
||||
num_gpus: 2
|
||||
source_file_dependencies:
|
||||
- csrc/quantization/fp4/
|
||||
- vllm/model_executor/layers/quantization/utils/flashinfer_utils.py
|
||||
- vllm/v1/attention/backends/flashinfer.py
|
||||
- vllm/compilation/
|
||||
# can affect pattern matching
|
||||
- vllm/model_executor/layers/layernorm.py
|
||||
- vllm/model_executor/layers/activation.py
|
||||
- vllm/model_executor/layers/quantization/input_quant_fp8.py
|
||||
- tests/compile/test_fusions_e2e.py
|
||||
commands:
|
||||
- nvidia-smi
|
||||
# Run all e2e fusion tests
|
||||
- pytest -v -s tests/compile/test_fusions_e2e.py
|
||||
|
||||
- label: Blackwell GPT-OSS Eval
|
||||
@ -1119,6 +1159,7 @@ steps:
|
||||
- pytest -v -s -x lora/test_llama_tp.py
|
||||
- pytest -v -s -x lora/test_llm_with_multi_loras.py
|
||||
- pytest -v -s -x lora/test_olmoe_tp.py
|
||||
- pytest -v -s -x lora/test_gptoss_tp.py
|
||||
|
||||
|
||||
- label: Weight Loading Multiple GPU Test # 33min
|
||||
@ -1212,6 +1253,7 @@ steps:
|
||||
- pytest -v -s tests/compile/test_fusions_e2e.py::test_tp2_attn_quant_allreduce_rmsnorm
|
||||
- pytest -v -s tests/distributed/test_context_parallel.py
|
||||
- CUDA_VISIBLE_DEVICES=1,2 VLLM_ALL2ALL_BACKEND=deepep_high_throughput VLLM_USE_DEEP_GEMM=1 VLLM_LOGGING_LEVEL=DEBUG python3 examples/offline_inference/data_parallel.py --model Qwen/Qwen1.5-MoE-A2.7B --tp-size=1 --dp-size=2 --max-model-len 2048
|
||||
- pytest -v -s tests/v1/distributed/test_dbo.py
|
||||
|
||||
##### B200 test #####
|
||||
- label: Distributed Tests (B200) # optional
|
||||
@ -1222,6 +1264,7 @@ steps:
|
||||
commands:
|
||||
- pytest -v -s tests/distributed/test_context_parallel.py
|
||||
- pytest -v -s tests/distributed/test_nccl_symm_mem_allreduce.py
|
||||
- pytest -v -s tests/v1/distributed/test_dbo.py
|
||||
|
||||
##### RL Integration Tests #####
|
||||
- label: Prime-RL Integration Test # 15min
|
||||
@ -1234,3 +1277,21 @@ steps:
|
||||
- .buildkite/scripts/run-prime-rl-test.sh
|
||||
commands:
|
||||
- bash .buildkite/scripts/run-prime-rl-test.sh
|
||||
|
||||
- label: DeepSeek V2-Lite Accuracy
|
||||
timeout_in_minutes: 60
|
||||
gpu: h100
|
||||
optional: true
|
||||
num_gpus: 4
|
||||
working_dir: "/vllm-workspace"
|
||||
commands:
|
||||
- bash .buildkite/scripts/scheduled_integration_test/deepseek_v2_lite_ep_eplb.sh 0.25 200 8010
|
||||
|
||||
- label: Qwen3-30B-A3B-FP8-block Accuracy
|
||||
timeout_in_minutes: 60
|
||||
gpu: h100
|
||||
optional: true
|
||||
num_gpus: 4
|
||||
working_dir: "/vllm-workspace"
|
||||
commands:
|
||||
- bash .buildkite/scripts/scheduled_integration_test/qwen30b_a3b_fp8_block_ep.sh 0.8 200 8020
|
||||
|
||||
27
.github/CODEOWNERS
vendored
27
.github/CODEOWNERS
vendored
@ -9,7 +9,7 @@
|
||||
/vllm/model_executor/layers/quantization @mgoin @robertgshaw2-redhat @tlrmchlsmth @yewentao256 @pavanimajety
|
||||
/vllm/model_executor/layers/mamba @tdoublep
|
||||
/vllm/model_executor/model_loader @22quinn
|
||||
/vllm/multimodal @DarkLight1337 @ywang96 @NickLucche
|
||||
/vllm/multimodal @DarkLight1337 @ywang96 @NickLucche @tjtanaa
|
||||
/vllm/vllm_flash_attn @LucasWilkinson
|
||||
/vllm/lora @jeejeelee
|
||||
/vllm/reasoning @aarnphm @chaunceyjiang
|
||||
@ -105,11 +105,21 @@ mkdocs.yaml @hmellor
|
||||
/vllm/attention/ops/triton_unified_attention.py @tdoublep
|
||||
|
||||
# ROCm related: specify owner with write access to notify AMD folks for careful code review
|
||||
/docker/Dockerfile.rocm* @gshtras
|
||||
/vllm/v1/attention/backends/rocm*.py @gshtras
|
||||
/vllm/v1/attention/backends/mla/rocm*.py @gshtras
|
||||
/vllm/attention/ops/rocm*.py @gshtras
|
||||
/vllm/model_executor/layers/fused_moe/rocm*.py @gshtras
|
||||
/vllm/**/*rocm* @tjtanaa
|
||||
/docker/Dockerfile.rocm* @gshtras @tjtanaa
|
||||
/vllm/v1/attention/backends/rocm*.py @gshtras @tjtanaa
|
||||
/vllm/v1/attention/backends/mla/rocm*.py @gshtras @tjtanaa
|
||||
/vllm/attention/ops/rocm*.py @gshtras @tjtanaa
|
||||
/vllm/model_executor/layers/fused_moe/rocm*.py @gshtras @tjtanaa
|
||||
/csrc/rocm @gshtras @tjtanaa
|
||||
/requirements/*rocm* @tjtanaa
|
||||
/tests/**/*rocm* @tjtanaa
|
||||
/docs/**/*rocm* @tjtanaa
|
||||
/vllm/**/*quark* @tjtanaa
|
||||
/tests/**/*quark* @tjtanaa
|
||||
/docs/**/*quark* @tjtanaa
|
||||
/vllm/**/*aiter* @tjtanaa
|
||||
/tests/**/*aiter* @tjtanaa
|
||||
|
||||
# TPU
|
||||
/vllm/v1/worker/tpu* @NickLucche
|
||||
@ -127,3 +137,8 @@ mkdocs.yaml @hmellor
|
||||
/vllm/config/pooler.py @noooop
|
||||
/vllm/pooling_params.py @noooop
|
||||
/vllm/model_executor/layers/pooler.py @noooop
|
||||
|
||||
# Security guide and policies
|
||||
/docs/usage/security.md @russellb
|
||||
/SECURITY.md @russellb
|
||||
/docs/contributing/vulnerability_management.md @russellb
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@ -221,3 +221,6 @@ csrc/moe/marlin_moe_wna16/kernel_*
|
||||
|
||||
# Ignore ep_kernels_workspace folder
|
||||
ep_kernels_workspace/
|
||||
|
||||
# Allow tracked library source folders under submodules (e.g., benchmarks/lib)
|
||||
!vllm/benchmarks/lib/
|
||||
|
||||
@ -38,7 +38,7 @@ repos:
|
||||
rev: 0.9.1
|
||||
hooks:
|
||||
- id: pip-compile
|
||||
args: [requirements/test.in, -o, requirements/test.txt, --index-strategy, unsafe-best-match, --torch-backend, cu129, --python-platform, x86_64-manylinux_2_28]
|
||||
args: [requirements/test.in, -o, requirements/test.txt, --index-strategy, unsafe-best-match, --torch-backend, cu129, --python-platform, x86_64-manylinux_2_28, --python-version, "3.12"]
|
||||
files: ^requirements/test\.(in|txt)$
|
||||
- repo: local
|
||||
hooks:
|
||||
|
||||
@ -241,7 +241,7 @@ if(VLLM_GPU_LANG STREQUAL "CUDA")
|
||||
message(STATUS "Enabling cumem allocator extension.")
|
||||
# link against cuda driver library
|
||||
list(APPEND CUMEM_LIBS CUDA::cuda_driver)
|
||||
define_gpu_extension_target(
|
||||
define_extension_target(
|
||||
cumem_allocator
|
||||
DESTINATION vllm
|
||||
LANGUAGE CXX
|
||||
@ -858,7 +858,7 @@ if (VLLM_GPU_LANG STREQUAL "HIP")
|
||||
endif()
|
||||
|
||||
message(STATUS "Enabling C extension.")
|
||||
define_gpu_extension_target(
|
||||
define_extension_target(
|
||||
_C
|
||||
DESTINATION vllm
|
||||
LANGUAGE ${VLLM_GPU_LANG}
|
||||
@ -973,7 +973,7 @@ if(VLLM_GPU_LANG STREQUAL "CUDA")
|
||||
endif()
|
||||
|
||||
message(STATUS "Enabling moe extension.")
|
||||
define_gpu_extension_target(
|
||||
define_extension_target(
|
||||
_moe_C
|
||||
DESTINATION vllm
|
||||
LANGUAGE ${VLLM_GPU_LANG}
|
||||
@ -994,7 +994,7 @@ if(VLLM_GPU_LANG STREQUAL "HIP")
|
||||
"csrc/rocm/skinny_gemms.cu"
|
||||
"csrc/rocm/attention.cu")
|
||||
|
||||
define_gpu_extension_target(
|
||||
define_extension_target(
|
||||
_rocm_C
|
||||
DESTINATION vllm
|
||||
LANGUAGE ${VLLM_GPU_LANG}
|
||||
|
||||
@ -21,6 +21,7 @@ Join us at the [PyTorch Conference, October 22-23](https://events.linuxfoundatio
|
||||
|
||||
*Latest News* 🔥
|
||||
|
||||
- [2025/11] We hosted [vLLM Beijing Meetup](https://mp.weixin.qq.com/s/xSrYXjNgr1HbCP4ExYNG1w) focusing on distributed inference and diverse accelerator support with vLLM! Please find the meetup slides [here](https://drive.google.com/drive/folders/1nQJ8ZkLSjKxvu36sSHaceVXtttbLvvu-?usp=drive_link).
|
||||
- [2025/10] We hosted [vLLM Shanghai Meetup](https://mp.weixin.qq.com/s/__xb4OyOsImz-9eAVrdlcg) focused on hands-on vLLM inference optimization! Please find the meetup slides [here](https://drive.google.com/drive/folders/1KqwjsFJLfEsC8wlDugnrR61zsWHt94Q6).
|
||||
- [2025/09] We hosted [vLLM Toronto Meetup](https://luma.com/e80e0ymm) focused on tackling inference at scale and speculative decoding with speakers from NVIDIA and Red Hat! Please find the meetup slides [here](https://docs.google.com/presentation/d/1IYJYmJcu9fLpID5N5RbW_vO0XLo0CGOR14IXOjB61V8/edit?usp=sharing).
|
||||
- [2025/08] We hosted [vLLM Shenzhen Meetup](https://mp.weixin.qq.com/s/k8ZBO1u2_2odgiKWH_GVTQ) focusing on the ecosystem around vLLM! Please find the meetup slides [here](https://drive.google.com/drive/folders/1Ua2SVKVSu-wp5vou_6ElraDt2bnKhiEA).
|
||||
@ -83,7 +84,7 @@ vLLM is flexible and easy to use with:
|
||||
- Tensor, pipeline, data and expert parallelism support for distributed inference
|
||||
- Streaming outputs
|
||||
- OpenAI-compatible API server
|
||||
- Support for NVIDIA GPUs, AMD CPUs and GPUs, Intel CPUs and GPUs, PowerPC CPUs, and TPU. Additionally, support for diverse hardware plugins such as Intel Gaudi, IBM Spyre and Huawei Ascend.
|
||||
- Support for NVIDIA GPUs, AMD CPUs and GPUs, Intel CPUs and GPUs, PowerPC CPUs, Arm CPUs, and TPU. Additionally, support for diverse hardware plugins such as Intel Gaudi, IBM Spyre and Huawei Ascend.
|
||||
- Prefix caching support
|
||||
- Multi-LoRA support
|
||||
|
||||
|
||||
@ -16,8 +16,8 @@ from vllm.model_executor.layers.fused_moe.fused_moe import (
|
||||
from vllm.utils.argparse_utils import FlexibleArgumentParser
|
||||
|
||||
DEFAULT_MODELS = [
|
||||
"nm-testing/Mixtral-8x7B-Instruct-v0.1",
|
||||
"nm-testing/deepseekv2-lite",
|
||||
"mistralai/Mixtral-8x7B-Instruct-v0.1",
|
||||
"deepseek-ai/DeepSeek-V2-Lite",
|
||||
"ibm-granite/granite-3.0-1b-a400m",
|
||||
"ibm-granite/granite-3.0-3b-a800m",
|
||||
]
|
||||
|
||||
@ -19,13 +19,24 @@ from torch.utils.benchmark import Measurement as TMeasurement
|
||||
from utils import ArgPool, Bench, CudaGraphBenchParams
|
||||
from weight_shapes import WEIGHT_SHAPES
|
||||
|
||||
from vllm.triton_utils import HAS_TRITON
|
||||
from vllm.lora.ops.triton_ops.utils import get_lora_op_configs
|
||||
from vllm.triton_utils import HAS_TRITON, triton
|
||||
|
||||
if HAS_TRITON:
|
||||
from vllm.lora.ops.triton_ops import LoRAKernelMeta, lora_expand, lora_shrink
|
||||
from vllm.lora.ops.triton_ops import ( ## added fused_moe_lora
|
||||
LoRAKernelMeta,
|
||||
fused_moe_lora_expand,
|
||||
fused_moe_lora_shrink,
|
||||
lora_expand,
|
||||
lora_shrink,
|
||||
)
|
||||
from vllm.lora.ops.triton_ops.fused_moe_lora_op import (
|
||||
_LORA_PTR_DICT, ## added _LORA_PTR_DICT for fused_moe_lora
|
||||
)
|
||||
from vllm.lora.ops.triton_ops.utils import _LORA_A_PTR_DICT, _LORA_B_PTR_DICT
|
||||
|
||||
from vllm import _custom_ops as ops
|
||||
from vllm.utils.argparse_utils import FlexibleArgumentParser
|
||||
from vllm.utils.math_utils import round_up
|
||||
|
||||
DEFAULT_MODELS = list(WEIGHT_SHAPES.keys())
|
||||
DEFAULT_TP_SIZES = [1]
|
||||
@ -59,6 +70,8 @@ DEFAULT_NUM_LORAS = [1, 2, 3, 4]
|
||||
DEFAULT_SORT_BY_LORA_IDS = [False, True]
|
||||
DEFAULT_SEQ_LENGTHS = [1]
|
||||
DEFAULT_EXPAND_FN_ADD_INPUTS = [True, False]
|
||||
DEFAULT_TOP_K_NUMS = [1] # Added for MoE LoRA top_k
|
||||
DEFAULT_NUM_EXPERTS = [8] # Added for MoE LoRA num_experts
|
||||
|
||||
|
||||
# Utilities
|
||||
@ -191,6 +204,11 @@ class OpType(Enum):
|
||||
|
||||
LORA_SHRINK = auto()
|
||||
LORA_EXPAND = auto()
|
||||
## Adding support for fused moe lora
|
||||
FUSED_MOE_LORA_GATE_UP_SHRINK = auto() ## Gate/Up projection variant with shrink
|
||||
FUSED_MOE_LORA_GATE_UP_EXPAND = auto() ## Gate/Up projection variant with expand
|
||||
FUSED_MOE_LORA_DOWN_SHRINK = auto() ## Down projection variant with shrink
|
||||
FUSED_MOE_LORA_DOWN_EXPAND = auto() ## Down projection variant with expand
|
||||
|
||||
@staticmethod
|
||||
def from_str(s: str) -> "OpType":
|
||||
@ -198,6 +216,15 @@ class OpType(Enum):
|
||||
return OpType.LORA_SHRINK
|
||||
if s.lower() == "lora_expand":
|
||||
return OpType.LORA_EXPAND
|
||||
# Adding support for fused moe lora, both in gate_up and down
|
||||
if s.lower() == "fused_moe_lora_gate_up_shrink": ## Gate/Up variant with shrink
|
||||
return OpType.FUSED_MOE_LORA_GATE_UP_SHRINK
|
||||
if s.lower() == "fused_moe_lora_gate_up_expand": ## Gate/Up variant with expand
|
||||
return OpType.FUSED_MOE_LORA_GATE_UP_EXPAND
|
||||
if s.lower() == "fused_moe_lora_down_shrink": ## Down variant with shrink
|
||||
return OpType.FUSED_MOE_LORA_DOWN_SHRINK
|
||||
if s.lower() == "fused_moe_lora_down_expand": ## Down variant with expand
|
||||
return OpType.FUSED_MOE_LORA_DOWN_EXPAND
|
||||
raise ValueError(f"Unrecognized str {s} to convert to OpType")
|
||||
|
||||
def is_shrink_fn(self) -> bool:
|
||||
@ -206,19 +233,56 @@ class OpType(Enum):
|
||||
def is_expand_fn(self) -> bool:
|
||||
return self in [OpType.LORA_EXPAND]
|
||||
|
||||
def is_fused_moe_lora_fn(self) -> bool: ## adding for fused MoE LoRA
|
||||
return self in [
|
||||
OpType.FUSED_MOE_LORA_GATE_UP_SHRINK,
|
||||
OpType.FUSED_MOE_LORA_DOWN_SHRINK,
|
||||
OpType.FUSED_MOE_LORA_GATE_UP_EXPAND,
|
||||
OpType.FUSED_MOE_LORA_DOWN_EXPAND,
|
||||
]
|
||||
|
||||
def is_fused_moe_lora_gate_up_fn(
|
||||
self,
|
||||
) -> bool: ## adding for fused MoE LoRA Gate/Up
|
||||
return self in [
|
||||
OpType.FUSED_MOE_LORA_GATE_UP_SHRINK,
|
||||
OpType.FUSED_MOE_LORA_GATE_UP_EXPAND,
|
||||
]
|
||||
|
||||
def is_fused_moe_lora_down_fn(self) -> bool: ## adding for fused MoE LoRA Down
|
||||
return self in [
|
||||
OpType.FUSED_MOE_LORA_DOWN_SHRINK,
|
||||
OpType.FUSED_MOE_LORA_DOWN_EXPAND,
|
||||
]
|
||||
|
||||
def is_fused_moe_lora_shrink_fn(self) -> bool:
|
||||
return self in [
|
||||
OpType.FUSED_MOE_LORA_GATE_UP_SHRINK,
|
||||
OpType.FUSED_MOE_LORA_DOWN_SHRINK,
|
||||
]
|
||||
|
||||
def is_fused_moe_lora_expand_fn(self) -> bool:
|
||||
return self in [
|
||||
OpType.FUSED_MOE_LORA_GATE_UP_EXPAND,
|
||||
OpType.FUSED_MOE_LORA_DOWN_EXPAND,
|
||||
]
|
||||
|
||||
def num_slices(self) -> list[int]:
|
||||
if self.is_fused_moe_lora_gate_up_fn():
|
||||
return [2]
|
||||
elif self.is_fused_moe_lora_down_fn():
|
||||
return [1]
|
||||
return [1, 2, 3]
|
||||
|
||||
def mkn(
|
||||
self, batch_size: int, seq_length: int, hidden_size: int, lora_rank: int
|
||||
) -> tuple[int, int, int]:
|
||||
num_tokens = batch_size * seq_length
|
||||
if self.is_shrink_fn():
|
||||
if self.is_shrink_fn() or self.is_fused_moe_lora_fn():
|
||||
m = num_tokens
|
||||
k = hidden_size
|
||||
n = lora_rank
|
||||
else:
|
||||
assert self.is_expand_fn()
|
||||
elif self.is_expand_fn():
|
||||
m = num_tokens
|
||||
k = lora_rank
|
||||
n = hidden_size
|
||||
@ -232,9 +296,36 @@ class OpType(Enum):
|
||||
"""
|
||||
if self.is_shrink_fn():
|
||||
return op_dtype, op_dtype, torch.float32
|
||||
else:
|
||||
assert self.is_expand_fn()
|
||||
elif self.is_expand_fn():
|
||||
return torch.float32, op_dtype, op_dtype
|
||||
else:
|
||||
assert self.is_fused_moe_lora_fn()
|
||||
return op_dtype, op_dtype, op_dtype
|
||||
|
||||
def matmul_shapes_fused_moe_lora(
|
||||
self,
|
||||
m: int,
|
||||
n: int,
|
||||
k: int,
|
||||
num_loras: int,
|
||||
num_slices: int,
|
||||
top_k_num: int,
|
||||
num_experts: int,
|
||||
) -> tuple[tuple[int], tuple[int], tuple[int], tuple[int]]:
|
||||
if self.is_fused_moe_lora_shrink_fn():
|
||||
input_shape = (
|
||||
(m * top_k_num, n)
|
||||
if self in [OpType.FUSED_MOE_LORA_DOWN_SHRINK]
|
||||
else (m, n)
|
||||
)
|
||||
output_shape = (num_slices, m, top_k_num, k)
|
||||
weight_shape = (num_loras, num_experts, k, n)
|
||||
else:
|
||||
assert self.is_fused_moe_lora_expand_fn()
|
||||
input_shape = (num_slices, m, top_k_num, k)
|
||||
output_shape = (m, top_k_num, n * num_slices)
|
||||
weight_shape = (num_loras, num_experts, n, k)
|
||||
return (input_shape, weight_shape, output_shape)
|
||||
|
||||
def matmul_shapes(
|
||||
self,
|
||||
@ -244,6 +335,8 @@ class OpType(Enum):
|
||||
lora_rank: int,
|
||||
num_loras: int,
|
||||
num_slices: int,
|
||||
top_k_num: int | None = None,
|
||||
num_experts: int | None = None,
|
||||
) -> tuple[tuple[int, ...], tuple[int, ...], tuple[int, ...]]:
|
||||
"""
|
||||
Given num_slices, return the shapes of the A, B, and C matrices
|
||||
@ -258,6 +351,16 @@ class OpType(Enum):
|
||||
if self in [OpType.LORA_EXPAND]:
|
||||
# LoRA expand kernels support num_slices inherently in the kernel
|
||||
return ((num_slices, m, k), b_shape, (m, n * num_slices))
|
||||
if self.is_fused_moe_lora_fn():
|
||||
return self.matmul_shapes_fused_moe_lora(
|
||||
m,
|
||||
k,
|
||||
n,
|
||||
num_loras,
|
||||
num_slices,
|
||||
top_k_num,
|
||||
num_experts,
|
||||
)
|
||||
raise ValueError(f"Unrecognized op_type {self}")
|
||||
|
||||
def bench_fn(self) -> Callable:
|
||||
@ -265,6 +368,16 @@ class OpType(Enum):
|
||||
return lora_shrink
|
||||
if self == OpType.LORA_EXPAND:
|
||||
return lora_expand
|
||||
if self in [
|
||||
OpType.FUSED_MOE_LORA_GATE_UP_SHRINK,
|
||||
OpType.FUSED_MOE_LORA_DOWN_SHRINK,
|
||||
]:
|
||||
return fused_moe_lora_shrink
|
||||
if self in [
|
||||
OpType.FUSED_MOE_LORA_GATE_UP_EXPAND,
|
||||
OpType.FUSED_MOE_LORA_DOWN_EXPAND,
|
||||
]:
|
||||
return fused_moe_lora_expand
|
||||
|
||||
raise ValueError(f"Unrecognized optype {self}")
|
||||
|
||||
@ -318,6 +431,8 @@ class BenchmarkContext:
|
||||
sort_by_lora_id: bool
|
||||
dtype: torch.dtype
|
||||
seq_length: int | None = None
|
||||
num_experts: int | None = None # num_experts for MoE based ops
|
||||
top_k_num: int | None = None # top_k for MoE based ops
|
||||
num_slices: int | None = None # num_slices for slice based ops
|
||||
|
||||
def with_seq_length(self, seq_length: int) -> "BenchmarkContext":
|
||||
@ -373,6 +488,11 @@ class BenchmarkTensors:
|
||||
f"{dtype_to_str(self.output.dtype)}"
|
||||
)
|
||||
|
||||
def get_num_tokens(self, size: int, top_k_num: int, op_type: OpType):
|
||||
return (
|
||||
size * top_k_num if op_type in [OpType.FUSED_MOE_LORA_DOWN_SHRINK] else size
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def make(
|
||||
ctx: BenchmarkContext, op_type: OpType, device: str = "cuda"
|
||||
@ -385,6 +505,8 @@ class BenchmarkTensors:
|
||||
ctx.lora_rank,
|
||||
ctx.num_loras,
|
||||
ctx.num_slices,
|
||||
ctx.top_k_num,
|
||||
ctx.num_experts,
|
||||
)
|
||||
a_type, b_type, c_type = op_type.matmul_dtypes(ctx.dtype)
|
||||
input_tensor, lora_weights, output_tensor = make_rand_tensors(
|
||||
@ -432,17 +554,27 @@ class BenchmarkTensors:
|
||||
prompt_lora_indices_tensor,
|
||||
)
|
||||
|
||||
def sanity_check(self) -> None:
|
||||
def sanity_check(self, ctx: BenchmarkContext, op_type: OpType) -> None:
|
||||
"""
|
||||
Fails asserts when non-conformality is detected.
|
||||
"""
|
||||
num_tokens = self.input.shape[-2]
|
||||
num_tokens = (
|
||||
self.input.shape[1]
|
||||
if op_type.is_fused_moe_lora_expand_fn()
|
||||
else self.input.shape[-2]
|
||||
)
|
||||
# check metadata tensors
|
||||
assert torch.sum(self.seq_lens) == num_tokens
|
||||
## In down shrink case, each token is repeated top_k_num times
|
||||
assert num_tokens == self.get_num_tokens(
|
||||
torch.sum(self.seq_lens), ctx.top_k_num, op_type
|
||||
), f"Expected {num_tokens} tokens, but got {torch.sum(self.seq_lens)}"
|
||||
num_seqs = self.seq_lens.shape[0]
|
||||
# assert self.seq_start_loc.shape[0] == num_seqs
|
||||
## In down shrink case, each prompt corresponds to top_k_num sequences
|
||||
assert self.prompt_lora_mapping.shape[0] == num_seqs
|
||||
assert self.lora_kernel_meta.token_lora_mapping.shape[0] == num_tokens
|
||||
assert self.get_num_tokens(
|
||||
self.lora_kernel_meta.token_lora_mapping.shape[0], ctx.top_k_num, op_type
|
||||
)
|
||||
|
||||
def to_device(self, device: str):
|
||||
"""
|
||||
@ -471,21 +603,111 @@ class BenchmarkTensors:
|
||||
to_device(field) if field_name != "no_lora_flag_cpu" else field,
|
||||
)
|
||||
|
||||
def metadata(self) -> tuple[int, int, int]:
|
||||
def metadata(self, ctx: BenchmarkContext, op_type: OpType) -> tuple[int, int, int]:
|
||||
"""
|
||||
Return num_seqs, num_tokens and max_seq_len
|
||||
"""
|
||||
num_seqs = self.seq_lens.shape[0]
|
||||
num_tokens = self.lora_kernel_meta.token_lora_mapping.shape[0]
|
||||
num_tokens = self.get_num_tokens(
|
||||
self.lora_kernel_meta.token_lora_mapping.shape[0], ctx.top_k_num, op_type
|
||||
)
|
||||
max_seq_len = torch.max(self.seq_lens).item()
|
||||
num_slices = len(self.lora_weights_lst)
|
||||
return num_seqs, num_tokens, max_seq_len, num_slices
|
||||
|
||||
def as_lora_shrink_kwargs(self) -> dict[str, Any]:
|
||||
self.sanity_check()
|
||||
def fused_moe_lora_data_prepare(
|
||||
self,
|
||||
block_size: int,
|
||||
token_lora_mapping: torch.Tensor,
|
||||
ctx: BenchmarkContext,
|
||||
):
|
||||
def moe_lora_align_block_size(
|
||||
topk_ids: torch.Tensor,
|
||||
token_lora_mapping: torch.Tensor,
|
||||
block_size: int,
|
||||
num_experts: int,
|
||||
max_loras: int,
|
||||
expert_map: torch.Tensor | None = None,
|
||||
pad_sorted_ids: bool = False,
|
||||
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
||||
"""
|
||||
Aligns tokens and experts into block-sized chunks for LoRA-based
|
||||
mixture-of-experts (MoE) execution.
|
||||
"""
|
||||
max_num_tokens_padded = topk_ids.numel() + num_experts * (block_size - 1)
|
||||
if pad_sorted_ids:
|
||||
max_num_tokens_padded = round_up(max_num_tokens_padded, block_size)
|
||||
sorted_ids = torch.empty(
|
||||
(max_loras * max_num_tokens_padded,),
|
||||
dtype=torch.int32,
|
||||
device=topk_ids.device,
|
||||
)
|
||||
max_num_m_blocks = triton.cdiv(max_num_tokens_padded, block_size)
|
||||
# Expert ids must be set default to -1 to prevent a blank block
|
||||
expert_ids = torch.empty(
|
||||
(max_loras * max_num_m_blocks,),
|
||||
dtype=torch.int32,
|
||||
device=topk_ids.device,
|
||||
)
|
||||
num_tokens_post_pad = torch.empty(
|
||||
(max_loras), dtype=torch.int32, device=topk_ids.device
|
||||
)
|
||||
|
||||
ops.moe_lora_align_block_size(
|
||||
topk_ids,
|
||||
token_lora_mapping,
|
||||
num_experts,
|
||||
block_size,
|
||||
max_loras,
|
||||
max_num_tokens_padded,
|
||||
max_num_m_blocks,
|
||||
sorted_ids,
|
||||
expert_ids,
|
||||
num_tokens_post_pad,
|
||||
)
|
||||
if expert_map is not None:
|
||||
expert_ids = expert_map[expert_ids]
|
||||
|
||||
return sorted_ids, expert_ids, num_tokens_post_pad
|
||||
|
||||
num_tokens = ctx.batch_size
|
||||
curr_topk_ids = torch.randint(
|
||||
0,
|
||||
ctx.num_experts,
|
||||
(num_tokens, ctx.top_k_num),
|
||||
device="cuda",
|
||||
dtype=torch.int32,
|
||||
)
|
||||
topk_weights = torch.randint(
|
||||
0,
|
||||
ctx.num_experts,
|
||||
(num_tokens, ctx.top_k_num),
|
||||
device="cuda",
|
||||
dtype=torch.int32,
|
||||
)
|
||||
|
||||
(sorted_token_ids_lora, expert_ids_lora, num_tokens_post_padded_lora) = (
|
||||
moe_lora_align_block_size(
|
||||
topk_ids=curr_topk_ids,
|
||||
token_lora_mapping=token_lora_mapping,
|
||||
block_size=block_size,
|
||||
num_experts=ctx.num_experts,
|
||||
max_loras=ctx.num_loras,
|
||||
)
|
||||
)
|
||||
|
||||
sorted_token_ids = sorted_token_ids_lora.view(ctx.num_loras, -1)
|
||||
expert_ids = expert_ids_lora.view(ctx.num_loras, -1)
|
||||
num_tokens_post_padded = num_tokens_post_padded_lora
|
||||
return (topk_weights, sorted_token_ids, expert_ids, num_tokens_post_padded)
|
||||
|
||||
def as_lora_shrink_kwargs(
|
||||
self, ctx: BenchmarkContext, op_type: OpType
|
||||
) -> dict[str, Any]:
|
||||
self.sanity_check(ctx, op_type)
|
||||
self.to_device(self.input.device)
|
||||
|
||||
_, num_tokens, _, num_slices = self.metadata()
|
||||
_, num_tokens, _, num_slices = self.metadata(ctx, op_type)
|
||||
|
||||
# Sanity check matrix shapes.
|
||||
i_shape, lw_shape, o_shape = (
|
||||
@ -520,11 +742,13 @@ class BenchmarkTensors:
|
||||
"no_lora_flag_cpu": self.lora_kernel_meta.no_lora_flag_cpu,
|
||||
}
|
||||
|
||||
def as_lora_expand_kwargs(self, add_inputs: bool) -> dict[str, Any]:
|
||||
self.sanity_check()
|
||||
def as_lora_expand_kwargs(
|
||||
self, ctx: BenchmarkContext, op_type: OpType, add_inputs: bool
|
||||
) -> dict[str, Any]:
|
||||
self.sanity_check(ctx, op_type)
|
||||
self.to_device(self.input.device)
|
||||
|
||||
_, num_tokens, _, num_slices = self.metadata()
|
||||
_, num_tokens, _, num_slices = self.metadata(ctx, op_type)
|
||||
|
||||
# Sanity check matrix shapes.
|
||||
i_shape, lw_shape, o_shape = (
|
||||
@ -561,18 +785,173 @@ class BenchmarkTensors:
|
||||
"no_lora_flag_cpu": self.lora_kernel_meta.no_lora_flag_cpu,
|
||||
}
|
||||
|
||||
def bench_fn_kwargs(
|
||||
self, op_type: OpType, add_inputs: bool | None = None
|
||||
def as_fused_moe_lora_shrink_kwargs(
|
||||
self, ctx: BenchmarkContext, op_type: OpType
|
||||
) -> dict[str, Any]:
|
||||
if op_type.is_shrink_fn():
|
||||
self.sanity_check(ctx, op_type)
|
||||
self.to_device(self.input.device)
|
||||
|
||||
_, num_tokens, _, num_slices = self.metadata(ctx, op_type)
|
||||
|
||||
# Sanity check matrix shapes.
|
||||
i_shape, lw_shape, o_shape = (
|
||||
self.input.shape,
|
||||
self.lora_weights_lst[0].shape,
|
||||
self.output.shape,
|
||||
)
|
||||
# Expected input shape : [num_tokens, hidden_size] for gate_up
|
||||
# Expected input shape : [top_k_num * num_tokens, hidden_size] for down
|
||||
assert len(i_shape) == 2
|
||||
assert i_shape[0] == num_tokens
|
||||
hidden_size = i_shape[1]
|
||||
# Expected lora weight shape [max_lora, num_experts, lora_rank, hidden_size]
|
||||
assert len(lw_shape) == 4
|
||||
assert lw_shape[-1] == hidden_size
|
||||
lora_rank = lw_shape[-2]
|
||||
# Expected output shape : [num_slices, num_tokens, top_k_num, lora_rank]
|
||||
assert len(o_shape) == 4
|
||||
assert (
|
||||
o_shape
|
||||
== (num_slices, num_tokens // ctx.top_k_num, ctx.top_k_num, lora_rank)
|
||||
if op_type in [OpType.FUSED_MOE_LORA_DOWN_SHRINK]
|
||||
else o_shape == (num_slices, num_tokens, ctx.top_k_num, lora_rank)
|
||||
)
|
||||
kernel_config = get_lora_op_configs(
|
||||
op_type.name.lower(),
|
||||
max_loras=lw_shape[0],
|
||||
batch=num_tokens,
|
||||
hidden_size=hidden_size,
|
||||
rank=lora_rank,
|
||||
num_slices=num_slices,
|
||||
add_inputs=False,
|
||||
)
|
||||
|
||||
(topk_weights, sorted_token_ids, expert_ids, num_tokens_post_padded) = (
|
||||
self.fused_moe_lora_data_prepare(
|
||||
block_size=kernel_config["BLOCK_SIZE_M"],
|
||||
token_lora_mapping=self.lora_kernel_meta.token_lora_mapping,
|
||||
ctx=ctx,
|
||||
)
|
||||
)
|
||||
|
||||
return {
|
||||
"qcurr_hidden_states": self.input,
|
||||
"lora_a_stacked": self.lora_weights_lst,
|
||||
"a_intermediate_cache1": self.output,
|
||||
"topk_weights": topk_weights,
|
||||
"sorted_token_ids": sorted_token_ids,
|
||||
"expert_ids": expert_ids,
|
||||
"num_tokens_post_padded": num_tokens_post_padded,
|
||||
"top_k_num": ctx.top_k_num,
|
||||
"device": self.input.device,
|
||||
"N": lora_rank,
|
||||
"M": topk_weights.shape[0],
|
||||
"EM": sorted_token_ids.shape[1],
|
||||
"K": self.input.shape[1],
|
||||
"num_tokens": num_tokens,
|
||||
"num_experts": ctx.num_experts,
|
||||
"num_slices": num_slices,
|
||||
"shrink_block_size_m": kernel_config["BLOCK_SIZE_M"],
|
||||
"shrink_block_size_n": kernel_config["BLOCK_SIZE_N"],
|
||||
"shrink_block_size_k": kernel_config["BLOCK_SIZE_K"],
|
||||
"shrink_group_size_m": kernel_config["GROUP_SIZE_M"],
|
||||
"shrink_num_warps": kernel_config["NUM_WARPS"],
|
||||
"shrink_num_stages": kernel_config["NUM_STAGES"],
|
||||
"shrink_split_k": kernel_config.get("SPLIT_K", 1),
|
||||
"mul_routed_weight": op_type.is_fused_moe_lora_down_fn(),
|
||||
}
|
||||
|
||||
def as_fused_moe_lora_expand_kwargs(
|
||||
self, ctx: BenchmarkContext, op_type: OpType
|
||||
) -> dict[str, Any]:
|
||||
self.sanity_check(ctx, op_type)
|
||||
self.to_device(self.input.device)
|
||||
|
||||
_, num_tokens, _, num_slices = self.metadata(ctx, op_type)
|
||||
|
||||
# Sanity check matrix shapes.
|
||||
i_shape, lw_shape, o_shape = (
|
||||
self.input.shape,
|
||||
self.lora_weights_lst[0].shape,
|
||||
self.output.shape,
|
||||
)
|
||||
|
||||
# Expected input shape : [num_slices, num_tokens, top_k_num, lora_rank]
|
||||
assert len(i_shape) == 4
|
||||
assert i_shape[0] == num_slices
|
||||
assert i_shape[1] == num_tokens
|
||||
lora_rank = i_shape[-1]
|
||||
# Expected lora weight shape : [num_loras, num_experts, hidden_size, lora_rank]
|
||||
assert len(lw_shape) == 4
|
||||
assert lw_shape[-1] == lora_rank
|
||||
hidden_size = lw_shape[-2]
|
||||
# Expected output shape : [num_tokens, top_k_num, hidden_size * num_slices]
|
||||
assert len(o_shape) == 3
|
||||
assert o_shape == (num_tokens, ctx.top_k_num, hidden_size * num_slices)
|
||||
|
||||
kernel_config = get_lora_op_configs(
|
||||
op_type.name.lower(),
|
||||
max_loras=lw_shape[0],
|
||||
batch=num_tokens,
|
||||
hidden_size=hidden_size,
|
||||
rank=lora_rank,
|
||||
num_slices=num_slices,
|
||||
add_inputs=False,
|
||||
)
|
||||
|
||||
(topk_weights, sorted_token_ids, expert_ids, num_tokens_post_padded) = (
|
||||
self.fused_moe_lora_data_prepare(
|
||||
block_size=kernel_config["BLOCK_SIZE_M"],
|
||||
token_lora_mapping=self.lora_kernel_meta.token_lora_mapping,
|
||||
ctx=ctx,
|
||||
)
|
||||
)
|
||||
|
||||
return {
|
||||
"a_intermediate_cache1": self.input,
|
||||
"lora_b_stacked": self.lora_weights_lst,
|
||||
"output": self.output,
|
||||
"topk_weights": topk_weights,
|
||||
"sorted_token_ids": sorted_token_ids,
|
||||
"expert_ids": expert_ids,
|
||||
"num_tokens_post_padded": num_tokens_post_padded,
|
||||
"top_k_num": ctx.top_k_num,
|
||||
"device": self.input.device,
|
||||
"N": lora_rank,
|
||||
"M": topk_weights.shape[0],
|
||||
"EM": sorted_token_ids.shape[1],
|
||||
"K": self.input.shape[1],
|
||||
"num_tokens": num_tokens,
|
||||
"num_experts": ctx.num_experts,
|
||||
"num_slices": num_slices,
|
||||
"max_lora_rank": lora_rank,
|
||||
"w1_output_dim_size": lw_shape[2],
|
||||
"expand_block_size_m": kernel_config["BLOCK_SIZE_M"],
|
||||
"expand_block_size_n": kernel_config["BLOCK_SIZE_N"],
|
||||
"expand_block_size_k": kernel_config["BLOCK_SIZE_K"],
|
||||
"expand_group_size_m": kernel_config["GROUP_SIZE_M"],
|
||||
"expand_num_warps": kernel_config["NUM_WARPS"],
|
||||
"expand_num_stages": kernel_config["NUM_STAGES"],
|
||||
"expand_split_k": kernel_config.get("SPLIT_K", 1),
|
||||
"mul_routed_weight": op_type.is_fused_moe_lora_down_fn(),
|
||||
}
|
||||
|
||||
def bench_fn_kwargs(
|
||||
self, ctx: BenchmarkContext, op_type: OpType, add_inputs: bool | None = None
|
||||
) -> dict[str, Any]:
|
||||
if op_type.is_shrink_fn() or op_type.is_fused_moe_lora_fn():
|
||||
assert add_inputs is None
|
||||
else:
|
||||
assert add_inputs is not None
|
||||
|
||||
if op_type == OpType.LORA_SHRINK:
|
||||
return self.as_lora_shrink_kwargs()
|
||||
return self.as_lora_shrink_kwargs(ctx, op_type)
|
||||
if op_type == OpType.LORA_EXPAND:
|
||||
return self.as_lora_expand_kwargs(add_inputs)
|
||||
return self.as_lora_expand_kwargs(ctx, op_type, add_inputs)
|
||||
if op_type.is_fused_moe_lora_shrink_fn():
|
||||
return self.as_fused_moe_lora_shrink_kwargs(ctx, op_type)
|
||||
if op_type.is_fused_moe_lora_expand_fn():
|
||||
return self.as_fused_moe_lora_expand_kwargs(ctx, op_type)
|
||||
raise ValueError(f"Unrecognized optype {self}")
|
||||
|
||||
def test_correctness(
|
||||
@ -617,7 +996,7 @@ def bench_optype(
|
||||
test_correctness: bool = False,
|
||||
) -> TMeasurement:
|
||||
assert arg_pool_size >= 1
|
||||
if op_type.is_shrink_fn():
|
||||
if op_type.is_shrink_fn() or op_type.is_fused_moe_lora_fn():
|
||||
assert expand_fn_add_inputs is None
|
||||
else:
|
||||
assert expand_fn_add_inputs is not None
|
||||
@ -627,23 +1006,30 @@ def bench_optype(
|
||||
BenchmarkTensors.make(ctx, op_type) for _ in range(arg_pool_size)
|
||||
]
|
||||
for bt in bench_tensors:
|
||||
bt.sanity_check()
|
||||
bt.sanity_check(ctx, op_type)
|
||||
|
||||
# Test correctness of our implementation.
|
||||
if test_correctness:
|
||||
assert op_type in [OpType.LORA_SHRINK, OpType.LORA_EXPAND], (
|
||||
f"Correctness testing is not supported for {op_type.name}."
|
||||
)
|
||||
assert all(
|
||||
[bt.test_correctness(op_type, expand_fn_add_inputs) for bt in bench_tensors]
|
||||
[
|
||||
bt.test_correctness(ctx, op_type, expand_fn_add_inputs)
|
||||
for bt in bench_tensors
|
||||
]
|
||||
)
|
||||
|
||||
# BenchmarkTensors -> dict (kwargs)
|
||||
kwargs_list = [
|
||||
bt.bench_fn_kwargs(op_type, add_inputs=expand_fn_add_inputs)
|
||||
bt.bench_fn_kwargs(ctx, op_type, add_inputs=expand_fn_add_inputs)
|
||||
for bt in bench_tensors
|
||||
]
|
||||
|
||||
# Clear LoRA optimization hash-maps.
|
||||
_LORA_A_PTR_DICT.clear()
|
||||
_LORA_B_PTR_DICT.clear()
|
||||
_LORA_PTR_DICT.clear()
|
||||
# Run bench function so that _LORA_A_PTR_DICT and _LORA_B_PTR_DICT are set up
|
||||
for kwargs in kwargs_list:
|
||||
op_type.bench_fn()(**kwargs)
|
||||
@ -793,7 +1179,9 @@ def run(args: argparse.Namespace, bench_ctxs: list[BenchmarkContext]):
|
||||
|
||||
# Benchmark bench_op
|
||||
expand_fn_add_inputs = (
|
||||
[None] if bench_op.is_shrink_fn() else args.expand_fn_add_inputs
|
||||
[None]
|
||||
if bench_op.is_shrink_fn() or bench_op.is_fused_moe_lora_fn()
|
||||
else args.expand_fn_add_inputs
|
||||
)
|
||||
for add_input_arg in expand_fn_add_inputs:
|
||||
seq_len_timers.append(
|
||||
@ -831,12 +1219,22 @@ def as_benchmark_contexts(
|
||||
hidden_sizes: list[int], lora_ranks: list[int], args: argparse.Namespace
|
||||
) -> list[BenchmarkContext]:
|
||||
ctxs: list[BenchmarkContext] = []
|
||||
for batch_size, hidden_size, lora_rank, num_loras, sort_by_lora_id in product( # noqa
|
||||
for (
|
||||
batch_size,
|
||||
hidden_size,
|
||||
lora_rank,
|
||||
num_loras,
|
||||
sort_by_lora_id,
|
||||
top_k_num,
|
||||
num_experts,
|
||||
) in product( # noqa
|
||||
args.batch_sizes,
|
||||
list(hidden_sizes),
|
||||
lora_ranks,
|
||||
args.num_loras,
|
||||
args.sort_by_lora_id,
|
||||
args.top_k_nums,
|
||||
args.num_experts,
|
||||
):
|
||||
ctxs.append(
|
||||
BenchmarkContext(
|
||||
@ -851,6 +1249,8 @@ def as_benchmark_contexts(
|
||||
seq_length=None,
|
||||
sort_by_lora_id=sort_by_lora_id,
|
||||
dtype=args.dtype,
|
||||
top_k_num=top_k_num,
|
||||
num_experts=num_experts,
|
||||
# To be filled based on the OpType to benchmark
|
||||
num_slices=None,
|
||||
)
|
||||
@ -1012,6 +1412,22 @@ if __name__ == "__main__":
|
||||
),
|
||||
)
|
||||
|
||||
p.add_argument(
|
||||
"--top-k-nums",
|
||||
nargs="+",
|
||||
type=int,
|
||||
default=DEFAULT_TOP_K_NUMS,
|
||||
help="Top-K values for MoE LoRA operations",
|
||||
)
|
||||
|
||||
p.add_argument(
|
||||
"--num-experts",
|
||||
nargs="+",
|
||||
type=int,
|
||||
default=DEFAULT_NUM_EXPERTS,
|
||||
help="Number of experts for MoE LoRA operations",
|
||||
)
|
||||
|
||||
parser = FlexibleArgumentParser(
|
||||
description=f"""
|
||||
Benchmark LoRA kernels:
|
||||
|
||||
@ -211,7 +211,7 @@ def get_rocm_tuning_space(use_fp16):
|
||||
num_warps_range = [1, 2, 4, 8]
|
||||
group_m_range = [1, 4, 8, 16, 32]
|
||||
num_stage_range = [2]
|
||||
waves_per_eu_range = [0]
|
||||
waves_per_eu_range = [0, 1, 2, 4]
|
||||
matrix_instr_nonkdim_range = [16, 32] if use_fp16 else []
|
||||
kpack_range = [1, 2] if use_fp16 else []
|
||||
|
||||
@ -590,6 +590,7 @@ def main(args: argparse.Namespace):
|
||||
"DeepseekV3ForCausalLM",
|
||||
"DeepseekV32ForCausalLM",
|
||||
"Glm4MoeForCausalLM",
|
||||
"NemotronHForCausalLM",
|
||||
):
|
||||
E = config.n_routed_experts
|
||||
topk = config.num_experts_per_tok
|
||||
@ -615,6 +616,11 @@ def main(args: argparse.Namespace):
|
||||
topk = config.moe_topk[0]
|
||||
intermediate_size = config.moe_intermediate_size[0]
|
||||
hidden_size = config.hidden_size
|
||||
elif config.architectures[0] in ["Qwen3OmniMoeForConditionalGeneration"]:
|
||||
E = config.thinker_config.text_config.num_experts
|
||||
topk = config.thinker_config.text_config.num_experts_per_tok
|
||||
intermediate_size = config.thinker_config.text_config.moe_intermediate_size
|
||||
hidden_size = config.thinker_config.text_config.hidden_size
|
||||
else:
|
||||
# Support for llama4
|
||||
config = config.get_text_config()
|
||||
|
||||
@ -78,11 +78,11 @@ WEIGHT_SHAPES = {
|
||||
}
|
||||
|
||||
WEIGHT_SHAPES_MOE = {
|
||||
"nm-testing/Mixtral-8x7B-Instruct-v0.1": [
|
||||
"mistralai/Mixtral-8x7B-Instruct-v0.1": [
|
||||
[8, 2, 4096, 28672],
|
||||
[8, 2, 14336, 4096],
|
||||
],
|
||||
"nm-testing/deepseekv2-lite": [
|
||||
"deepseek-ai/DeepSeek-V2-Lite": [
|
||||
[64, 6, 2048, 1408],
|
||||
],
|
||||
"ibm-granite/granite-3.0-1b-a400m": [
|
||||
|
||||
@ -1429,8 +1429,6 @@ async def main() -> None:
|
||||
random.seed(args.seed)
|
||||
np.random.seed(args.seed)
|
||||
|
||||
if not os.path.exists(args.model):
|
||||
raise OSError(f"Path does not exist: {args.model}")
|
||||
logger.info("Loading tokenizer")
|
||||
tokenizer = AutoTokenizer.from_pretrained(args.model)
|
||||
|
||||
|
||||
@ -343,7 +343,7 @@ message(STATUS "CPU extension source files: ${VLLM_EXT_SRC}")
|
||||
# Define extension targets
|
||||
#
|
||||
|
||||
define_gpu_extension_target(
|
||||
define_extension_target(
|
||||
_C
|
||||
DESTINATION vllm
|
||||
LANGUAGE CXX
|
||||
@ -354,4 +354,4 @@ define_gpu_extension_target(
|
||||
WITH_SOABI
|
||||
)
|
||||
|
||||
message(STATUS "Enabling C extension.")
|
||||
message(STATUS "Enabling C extension.")
|
||||
|
||||
@ -92,7 +92,7 @@ if(FLASH_MLA_ARCHS)
|
||||
SRCS "${FlashMLA_Extension_SOURCES}"
|
||||
CUDA_ARCHS "${FLASH_MLA_ARCHS}")
|
||||
|
||||
define_gpu_extension_target(
|
||||
define_extension_target(
|
||||
_flashmla_C
|
||||
DESTINATION vllm
|
||||
LANGUAGE ${VLLM_GPU_LANG}
|
||||
@ -109,7 +109,7 @@ if(FLASH_MLA_ARCHS)
|
||||
$<$<COMPILE_LANGUAGE:CUDA>:-UPy_LIMITED_API>
|
||||
$<$<COMPILE_LANGUAGE:CXX>:-UPy_LIMITED_API>)
|
||||
|
||||
define_gpu_extension_target(
|
||||
define_extension_target(
|
||||
_flashmla_extension_C
|
||||
DESTINATION vllm
|
||||
LANGUAGE ${VLLM_GPU_LANG}
|
||||
|
||||
@ -38,7 +38,7 @@ else()
|
||||
FetchContent_Declare(
|
||||
vllm-flash-attn
|
||||
GIT_REPOSITORY https://github.com/vllm-project/flash-attention.git
|
||||
GIT_TAG a893712401d70362fbb299cd9c4b3476e8e9ed54
|
||||
GIT_TAG 8e1b01d56210dc72030a2d0d41c2d8d266ba6309
|
||||
GIT_PROGRESS TRUE
|
||||
# Don't share the vllm-flash-attn build between build types
|
||||
BINARY_DIR ${CMAKE_BINARY_DIR}/vllm-flash-attn
|
||||
|
||||
@ -453,21 +453,20 @@ macro(override_gpu_arches GPU_ARCHES GPU_LANG GPU_SUPPORTED_ARCHES)
|
||||
endmacro()
|
||||
|
||||
#
|
||||
# Define a target named `GPU_MOD_NAME` for a single extension. The
|
||||
# Define a target named `MOD_NAME` for a single extension. The
|
||||
# arguments are:
|
||||
#
|
||||
# DESTINATION <dest> - Module destination directory.
|
||||
# LANGUAGE <lang> - The GPU language for this module, e.g CUDA, HIP,
|
||||
# etc.
|
||||
# LANGUAGE <lang> - The language for this module, e.g. CUDA, HIP,
|
||||
# CXX, etc.
|
||||
# SOURCES <sources> - List of source files relative to CMakeLists.txt
|
||||
# directory.
|
||||
#
|
||||
# Optional arguments:
|
||||
#
|
||||
# ARCHITECTURES <arches> - A list of target GPU architectures in cmake
|
||||
# format.
|
||||
# Refer `CMAKE_CUDA_ARCHITECTURES` documentation
|
||||
# and `CMAKE_HIP_ARCHITECTURES` for more info.
|
||||
# ARCHITECTURES <arches> - A list of target architectures in cmake format.
|
||||
# For GPU, refer to CMAKE_CUDA_ARCHITECTURES and
|
||||
# CMAKE_HIP_ARCHITECTURES for more info.
|
||||
# ARCHITECTURES will use cmake's defaults if
|
||||
# not provided.
|
||||
# COMPILE_FLAGS <flags> - Extra compiler flags passed to NVCC/hip.
|
||||
@ -478,63 +477,61 @@ endmacro()
|
||||
#
|
||||
# Note: optimization level/debug info is set via cmake build type.
|
||||
#
|
||||
function (define_gpu_extension_target GPU_MOD_NAME)
|
||||
function (define_extension_target MOD_NAME)
|
||||
cmake_parse_arguments(PARSE_ARGV 1
|
||||
GPU
|
||||
ARG
|
||||
"WITH_SOABI"
|
||||
"DESTINATION;LANGUAGE;USE_SABI"
|
||||
"SOURCES;ARCHITECTURES;COMPILE_FLAGS;INCLUDE_DIRECTORIES;LIBRARIES")
|
||||
|
||||
# Add hipify preprocessing step when building with HIP/ROCm.
|
||||
if (GPU_LANGUAGE STREQUAL "HIP")
|
||||
hipify_sources_target(GPU_SOURCES ${GPU_MOD_NAME} "${GPU_SOURCES}")
|
||||
if (ARG_LANGUAGE STREQUAL "HIP")
|
||||
hipify_sources_target(ARG_SOURCES ${MOD_NAME} "${ARG_SOURCES}")
|
||||
endif()
|
||||
|
||||
if (GPU_WITH_SOABI)
|
||||
set(GPU_WITH_SOABI WITH_SOABI)
|
||||
if (ARG_WITH_SOABI)
|
||||
set(SOABI_KEYWORD WITH_SOABI)
|
||||
else()
|
||||
set(GPU_WITH_SOABI)
|
||||
set(SOABI_KEYWORD "")
|
||||
endif()
|
||||
|
||||
if (GPU_USE_SABI)
|
||||
Python_add_library(${GPU_MOD_NAME} MODULE USE_SABI ${GPU_USE_SABI} ${GPU_WITH_SOABI} "${GPU_SOURCES}")
|
||||
if (ARG_USE_SABI)
|
||||
Python_add_library(${MOD_NAME} MODULE USE_SABI ${ARG_USE_SABI} ${SOABI_KEYWORD} "${ARG_SOURCES}")
|
||||
else()
|
||||
Python_add_library(${GPU_MOD_NAME} MODULE ${GPU_WITH_SOABI} "${GPU_SOURCES}")
|
||||
Python_add_library(${MOD_NAME} MODULE ${SOABI_KEYWORD} "${ARG_SOURCES}")
|
||||
endif()
|
||||
|
||||
if (GPU_LANGUAGE STREQUAL "HIP")
|
||||
if (ARG_LANGUAGE STREQUAL "HIP")
|
||||
# Make this target dependent on the hipify preprocessor step.
|
||||
add_dependencies(${GPU_MOD_NAME} hipify${GPU_MOD_NAME})
|
||||
add_dependencies(${MOD_NAME} hipify${MOD_NAME})
|
||||
# Make sure we include the hipified versions of the headers, and avoid conflicts with the ones in the original source folder
|
||||
target_include_directories(${GPU_MOD_NAME} PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/csrc
|
||||
${GPU_INCLUDE_DIRECTORIES})
|
||||
target_include_directories(${MOD_NAME} PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/csrc
|
||||
${ARG_INCLUDE_DIRECTORIES})
|
||||
else()
|
||||
target_include_directories(${GPU_MOD_NAME} PRIVATE csrc
|
||||
${GPU_INCLUDE_DIRECTORIES})
|
||||
target_include_directories(${MOD_NAME} PRIVATE csrc
|
||||
${ARG_INCLUDE_DIRECTORIES})
|
||||
endif()
|
||||
|
||||
if (GPU_ARCHITECTURES)
|
||||
set_target_properties(${GPU_MOD_NAME} PROPERTIES
|
||||
${GPU_LANGUAGE}_ARCHITECTURES "${GPU_ARCHITECTURES}")
|
||||
if (ARG_ARCHITECTURES)
|
||||
set_target_properties(${MOD_NAME} PROPERTIES
|
||||
${ARG_LANGUAGE}_ARCHITECTURES "${ARG_ARCHITECTURES}")
|
||||
endif()
|
||||
|
||||
target_compile_options(${MOD_NAME} PRIVATE
|
||||
$<$<COMPILE_LANGUAGE:${ARG_LANGUAGE}>:${ARG_COMPILE_FLAGS}>)
|
||||
|
||||
target_compile_options(${GPU_MOD_NAME} PRIVATE
|
||||
$<$<COMPILE_LANGUAGE:${GPU_LANGUAGE}>:${GPU_COMPILE_FLAGS}>)
|
||||
target_compile_definitions(${MOD_NAME} PRIVATE
|
||||
"-DTORCH_EXTENSION_NAME=${MOD_NAME}")
|
||||
|
||||
target_compile_definitions(${GPU_MOD_NAME} PRIVATE
|
||||
"-DTORCH_EXTENSION_NAME=${GPU_MOD_NAME}")
|
||||
|
||||
|
||||
target_link_libraries(${GPU_MOD_NAME} PRIVATE torch ${GPU_LIBRARIES})
|
||||
target_link_libraries(${MOD_NAME} PRIVATE torch ${ARG_LIBRARIES})
|
||||
|
||||
# Don't use `TORCH_LIBRARIES` for CUDA since it pulls in a bunch of
|
||||
# dependencies that are not necessary and may not be installed.
|
||||
if (GPU_LANGUAGE STREQUAL "CUDA")
|
||||
target_link_libraries(${GPU_MOD_NAME} PRIVATE CUDA::cudart CUDA::cuda_driver)
|
||||
if (ARG_LANGUAGE STREQUAL "CUDA")
|
||||
target_link_libraries(${MOD_NAME} PRIVATE torch CUDA::cudart CUDA::cuda_driver ${ARG_LIBRARIES})
|
||||
else()
|
||||
target_link_libraries(${GPU_MOD_NAME} PRIVATE ${TORCH_LIBRARIES})
|
||||
target_link_libraries(${MOD_NAME} PRIVATE torch ${TORCH_LIBRARIES} ${ARG_LIBRARIES})
|
||||
endif()
|
||||
|
||||
install(TARGETS ${GPU_MOD_NAME} LIBRARY DESTINATION ${GPU_DESTINATION} COMPONENT ${GPU_MOD_NAME})
|
||||
install(TARGETS ${MOD_NAME} LIBRARY DESTINATION ${ARG_DESTINATION} COMPONENT ${MOD_NAME})
|
||||
endfunction()
|
||||
|
||||
@ -46,6 +46,32 @@ __global__ void merge_attn_states_kernel(
|
||||
s_lse = std::isinf(s_lse) ? -std::numeric_limits<float>::infinity() : s_lse;
|
||||
|
||||
const float max_lse = fmaxf(p_lse, s_lse);
|
||||
|
||||
/* In certain edge cases, MLA can produce p_lse = s_lse = -inf;
|
||||
continuing the pipeline then yields NaN. Root cause: with chunked prefill
|
||||
a batch may be split into two chunks; if a request in that batch has no
|
||||
prefix hit, every LSE entry for that request’s position is -inf, and at
|
||||
this moment we merge cross-attention at first. For now we simply emit
|
||||
prefix_output (expected to be all zeros) and prefix_lse (-inf) to fix
|
||||
this problem.
|
||||
*/
|
||||
if (std::isinf(max_lse)) {
|
||||
if (pack_offset < head_size) {
|
||||
// Pack 128b load
|
||||
pack_128b_t p_out_pack = reinterpret_cast<const pack_128b_t*>(
|
||||
prefix_head_ptr)[pack_offset / pack_size];
|
||||
|
||||
// Pack 128b storage
|
||||
reinterpret_cast<pack_128b_t*>(output_head_ptr)[pack_offset / pack_size] =
|
||||
p_out_pack;
|
||||
}
|
||||
// We only need to write to output_lse once per head.
|
||||
if (output_lse != nullptr && pack_idx == 0) {
|
||||
output_lse[head_idx * num_tokens + token_idx] = max_lse;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
p_lse = p_lse - max_lse;
|
||||
s_lse = s_lse - max_lse;
|
||||
const float p_se = expf(p_lse);
|
||||
|
||||
@ -24,6 +24,8 @@ struct SSMParamsBase {
|
||||
int64_t pad_slot_id;
|
||||
|
||||
bool delta_softplus;
|
||||
bool cache_enabled;
|
||||
int block_size;
|
||||
|
||||
index_t A_d_stride;
|
||||
index_t A_dstate_stride;
|
||||
@ -46,8 +48,9 @@ struct SSMParamsBase {
|
||||
index_t out_z_batch_stride;
|
||||
index_t out_z_d_stride;
|
||||
index_t ssm_states_batch_stride;
|
||||
index_t ssm_states_dim_stride;
|
||||
index_t ssm_states_dim_stride;
|
||||
index_t ssm_states_dstate_stride;
|
||||
index_t cache_indices_stride;
|
||||
|
||||
// Common data pointers.
|
||||
void *__restrict__ A_ptr;
|
||||
@ -66,6 +69,9 @@ struct SSMParamsBase {
|
||||
void *__restrict__ cache_indices_ptr;
|
||||
void *__restrict__ has_initial_state_ptr;
|
||||
|
||||
void *__restrict__ block_idx_first_scheduled_token_ptr; // (batch,) - first block to write
|
||||
void *__restrict__ block_idx_last_scheduled_token_ptr; // (batch,) - last block to write
|
||||
void *__restrict__ initial_state_idx_ptr; // (batch,) - index of the initial state to use
|
||||
};
|
||||
|
||||
|
||||
|
||||
@ -119,7 +119,7 @@ void selective_scan_fwd_kernel(SSMParamsBase params) {
|
||||
|
||||
const int* cache_indices = params.cache_indices_ptr == nullptr ? nullptr
|
||||
: reinterpret_cast<int *>(params.cache_indices_ptr);
|
||||
const int cache_index = cache_indices == nullptr ? batch_id : cache_indices[batch_id];
|
||||
const int cache_index = cache_indices == nullptr ? batch_id : cache_indices[batch_id];
|
||||
// cache_index == params.pad_slot_id is defined as padding, so we exit early
|
||||
if (cache_index == params.pad_slot_id){
|
||||
return;
|
||||
@ -133,9 +133,18 @@ void selective_scan_fwd_kernel(SSMParamsBase params) {
|
||||
input_t *Bvar = reinterpret_cast<input_t *>(params.B_ptr) + sequence_start_index * params.B_batch_stride + group_id * params.B_group_stride;
|
||||
weight_t *C = reinterpret_cast<weight_t *>(params.C_ptr) + dim_id * kNRows * params.C_d_stride;
|
||||
input_t *Cvar = reinterpret_cast<input_t *>(params.C_ptr) + sequence_start_index * params.C_batch_stride + group_id * params.C_group_stride;
|
||||
typename Ktraits::state_t *ssm_states = reinterpret_cast<typename Ktraits::state_t *>(params.ssm_states_ptr) +
|
||||
cache_index * params.ssm_states_batch_stride +
|
||||
dim_id * kNRows * params.ssm_states_dim_stride;
|
||||
|
||||
typename Ktraits::state_t *ssm_states;
|
||||
if (params.cache_enabled) {
|
||||
// APC mode: ssm_states points to the base, we'll use absolute cache slots later
|
||||
ssm_states = reinterpret_cast<typename Ktraits::state_t *>(params.ssm_states_ptr) +
|
||||
dim_id * kNRows * params.ssm_states_dim_stride;
|
||||
} else {
|
||||
// Non-APC mode: offset by cache_index as before
|
||||
ssm_states = reinterpret_cast<typename Ktraits::state_t *>(params.ssm_states_ptr) +
|
||||
cache_index * params.ssm_states_batch_stride +
|
||||
dim_id * kNRows * params.ssm_states_dim_stride;
|
||||
}
|
||||
|
||||
float D_val[kNRows] = {0};
|
||||
if (params.D_ptr != nullptr) {
|
||||
@ -159,7 +168,22 @@ void selective_scan_fwd_kernel(SSMParamsBase params) {
|
||||
// }
|
||||
|
||||
constexpr int kChunkSize = kNThreads * kNItems;
|
||||
const int n_chunks = (seqlen + 2048 - 1) / 2048;
|
||||
|
||||
// Use block_size for chunking when APC is enabled, otherwise use 2048 for backwards compatibility
|
||||
const int iteration_chunk_size = params.cache_enabled ? params.block_size : 2048;
|
||||
const int n_chunks = (seqlen + iteration_chunk_size - 1) / iteration_chunk_size;
|
||||
|
||||
const int* batch_cache_indices = cache_indices != nullptr ?
|
||||
cache_indices + batch_id * params.cache_indices_stride : nullptr;
|
||||
const int* block_idx_first_scheduled = params.block_idx_first_scheduled_token_ptr != nullptr ?
|
||||
reinterpret_cast<const int*>(params.block_idx_first_scheduled_token_ptr) : nullptr;
|
||||
const int* block_idx_last_scheduled = params.block_idx_last_scheduled_token_ptr != nullptr ?
|
||||
reinterpret_cast<const int*>(params.block_idx_last_scheduled_token_ptr) : nullptr;
|
||||
const int* initial_state_idx = params.initial_state_idx_ptr != nullptr ?
|
||||
reinterpret_cast<const int*>(params.initial_state_idx_ptr) : nullptr;
|
||||
|
||||
const size_t load_cache_slot = params.cache_enabled && batch_cache_indices != nullptr ? batch_cache_indices[initial_state_idx[batch_id]] : cache_index;
|
||||
|
||||
for (int chunk = 0; chunk < n_chunks; ++chunk) {
|
||||
input_t u_vals[kNRows][kNItems], delta_vals_load[kNRows][kNItems];
|
||||
|
||||
@ -219,7 +243,7 @@ void selective_scan_fwd_kernel(SSMParamsBase params) {
|
||||
if constexpr (kIsVariableC) {
|
||||
auto &smem_load_weight_C = !kIsVariableB ? smem_load_weight : smem_load_weight1;
|
||||
load_weight<Ktraits>(Cvar + state_idx * params.C_dstate_stride, C_vals,
|
||||
smem_load_weight_C, (seqlen - chunk * kChunkSize) * (1 ));
|
||||
smem_load_weight_C, (seqlen - chunk * kChunkSize) * (1));
|
||||
if constexpr (!kIsVariableB) {
|
||||
#pragma unroll
|
||||
for (int r = 0; r < kNRows; ++r) {
|
||||
@ -242,7 +266,6 @@ void selective_scan_fwd_kernel(SSMParamsBase params) {
|
||||
for (int i = 0; i < kNItems; ++i) {
|
||||
thread_data[i] = make_float2(exp2f(delta_vals[r][i] * A_val[r]),
|
||||
!kIsVariableB ? delta_u_vals[r][i] : B_vals[i] * delta_u_vals[r][i]);
|
||||
|
||||
if (seqlen % (kNItems * kNThreads) != 0) { // So that the last state is correct
|
||||
if (threadIdx.x * kNItems + i >= seqlen - chunk * kChunkSize) {
|
||||
thread_data[i] = make_float2(1.f, 0.f);
|
||||
@ -250,8 +273,24 @@ void selective_scan_fwd_kernel(SSMParamsBase params) {
|
||||
}
|
||||
}
|
||||
// Initialize running total
|
||||
|
||||
scan_t running_prefix = chunk > 0 ? smem_running_prefix[state_idx + r * MAX_DSTATE] : make_float2(1.0, has_initial_state ? float(ssm_states[state_idx * params.ssm_states_dstate_stride]): 0.0);
|
||||
scan_t running_prefix;
|
||||
if (chunk > 0) {
|
||||
running_prefix = smem_running_prefix[state_idx + r * MAX_DSTATE];
|
||||
} else {
|
||||
// Load initial state
|
||||
if (params.cache_enabled && has_initial_state && batch_cache_indices != nullptr) {
|
||||
size_t state_offset = load_cache_slot * params.ssm_states_batch_stride +
|
||||
r * params.ssm_states_dim_stride +
|
||||
state_idx * params.ssm_states_dstate_stride;
|
||||
running_prefix = make_float2(1.0, float(ssm_states[state_offset]));
|
||||
} else if (has_initial_state) {
|
||||
// Non-APC mode: load from current batch position
|
||||
running_prefix = make_float2(1.0, float(ssm_states[state_idx * params.ssm_states_dstate_stride]));
|
||||
} else {
|
||||
// No initial state
|
||||
running_prefix = make_float2(1.0, 0.0);
|
||||
}
|
||||
}
|
||||
|
||||
SSMScanPrefixCallbackOp<weight_t> prefix_op(running_prefix);
|
||||
typename Ktraits::BlockScanT(smem_scan).InclusiveScan(
|
||||
@ -260,8 +299,25 @@ void selective_scan_fwd_kernel(SSMParamsBase params) {
|
||||
// There's a syncthreads in the scan op, so we don't need to sync here.
|
||||
// Unless there's only 1 warp, but then it's the same thread (0) reading and writing.
|
||||
if (threadIdx.x == 0) {
|
||||
smem_running_prefix[state_idx] = prefix_op.running_prefix;
|
||||
if (chunk == n_chunks - 1) {
|
||||
smem_running_prefix[state_idx + r * MAX_DSTATE] = prefix_op.running_prefix;
|
||||
|
||||
// Store state at the end of each chunk when cache is enabled
|
||||
if (params.cache_enabled && batch_cache_indices != nullptr) {
|
||||
|
||||
size_t cache_slot;
|
||||
if (chunk == n_chunks - 1) {
|
||||
cache_slot = batch_cache_indices[block_idx_last_scheduled[batch_id]];
|
||||
} else {
|
||||
cache_slot = batch_cache_indices[block_idx_first_scheduled[batch_id] + chunk];
|
||||
}
|
||||
|
||||
size_t state_offset = cache_slot * params.ssm_states_batch_stride +
|
||||
r * params.ssm_states_dim_stride +
|
||||
state_idx * params.ssm_states_dstate_stride;
|
||||
|
||||
ssm_states[state_offset] = typename Ktraits::state_t(prefix_op.running_prefix.y);
|
||||
} else if (!params.cache_enabled && chunk == n_chunks - 1) {
|
||||
// Non-APC mode: store only final state at current batch position
|
||||
ssm_states[state_idx * params.ssm_states_dstate_stride] = typename Ktraits::state_t(prefix_op.running_prefix.y);
|
||||
}
|
||||
}
|
||||
@ -274,7 +330,6 @@ void selective_scan_fwd_kernel(SSMParamsBase params) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
input_t *out = reinterpret_cast<input_t *>(params.out_ptr) + sequence_start_index * params.out_batch_stride
|
||||
+ dim_id * kNRows * params.out_d_stride + chunk * kChunkSize;
|
||||
__syncthreads();
|
||||
@ -346,7 +401,9 @@ template<typename input_t, typename weight_t, typename state_t>
|
||||
void selective_scan_fwd_cuda(SSMParamsBase ¶ms, cudaStream_t stream) {
|
||||
|
||||
#ifndef USE_ROCM
|
||||
if (params.seqlen <= 128) {
|
||||
if (params.cache_enabled && params.block_size == 1024) {
|
||||
selective_scan_fwd_launch<64, 16, input_t, weight_t, state_t>(params, stream);
|
||||
} else if (params.seqlen <= 128) {
|
||||
selective_scan_fwd_launch<32, 4, input_t, weight_t, state_t>(params, stream);
|
||||
} else if (params.seqlen <= 256) {
|
||||
selective_scan_fwd_launch<32, 8, input_t, weight_t, state_t>(params, stream);
|
||||
@ -358,7 +415,9 @@ void selective_scan_fwd_cuda(SSMParamsBase ¶ms, cudaStream_t stream) {
|
||||
selective_scan_fwd_launch<128, 16, input_t, weight_t, state_t>(params, stream);
|
||||
}
|
||||
#else
|
||||
if (params.seqlen <= 256) {
|
||||
if (params.cache_enabled && params.block_size == 1024) {
|
||||
selective_scan_fwd_launch<64, 16, input_t, weight_t, state_t>(params, stream);
|
||||
} else if (params.seqlen <= 256) {
|
||||
selective_scan_fwd_launch<64, 4, input_t, weight_t, state_t>(params, stream);
|
||||
} else if (params.seqlen <= 512) {
|
||||
selective_scan_fwd_launch<64, 8, input_t, weight_t, state_t>(params, stream);
|
||||
@ -437,13 +496,17 @@ void set_ssm_params_fwd(SSMParamsBase ¶ms,
|
||||
const std::optional<at::Tensor>& D,
|
||||
const std::optional<at::Tensor>& delta_bias,
|
||||
const torch::Tensor ssm_states,
|
||||
bool has_z,
|
||||
bool has_z,
|
||||
bool delta_softplus,
|
||||
const std::optional<at::Tensor>& query_start_loc,
|
||||
const std::optional<at::Tensor>& cache_indices,
|
||||
const std::optional<at::Tensor>& has_initial_state,
|
||||
bool varlen,
|
||||
int64_t pad_slot_id) {
|
||||
int64_t pad_slot_id,
|
||||
int64_t block_size,
|
||||
const std::optional<torch::Tensor> &block_idx_first_scheduled_token,
|
||||
const std::optional<torch::Tensor> &block_idx_last_scheduled_token,
|
||||
const std::optional<torch::Tensor> &initial_state_idx) {
|
||||
|
||||
// Reset the parameters
|
||||
memset(¶ms, 0, sizeof(params));
|
||||
@ -477,6 +540,14 @@ void set_ssm_params_fwd(SSMParamsBase ¶ms,
|
||||
params.cache_indices_ptr = cache_indices.has_value() ? cache_indices.value().data_ptr() : nullptr;
|
||||
params.has_initial_state_ptr = has_initial_state.has_value() ? has_initial_state.value().data_ptr() : nullptr;
|
||||
|
||||
// Set cache parameters - cache is enabled if we have direct cache writing params
|
||||
params.cache_enabled = block_idx_first_scheduled_token.has_value();
|
||||
params.block_size = static_cast<int>(block_size);
|
||||
|
||||
// Set direct cache writing pointers
|
||||
params.block_idx_first_scheduled_token_ptr = block_idx_first_scheduled_token.has_value() ? block_idx_first_scheduled_token.value().data_ptr() : nullptr;
|
||||
params.block_idx_last_scheduled_token_ptr = block_idx_last_scheduled_token.has_value() ? block_idx_last_scheduled_token.value().data_ptr() : nullptr;
|
||||
params.initial_state_idx_ptr = initial_state_idx.has_value() ? initial_state_idx.value().data_ptr() : nullptr;
|
||||
|
||||
// All stride are in elements, not bytes.
|
||||
params.A_d_stride = A.stride(0);
|
||||
@ -504,9 +575,11 @@ void set_ssm_params_fwd(SSMParamsBase ¶ms,
|
||||
params.out_d_stride = out.stride(0);
|
||||
|
||||
params.ssm_states_batch_stride = ssm_states.stride(0);
|
||||
params.ssm_states_dim_stride = ssm_states.stride(1);
|
||||
params.ssm_states_dim_stride = ssm_states.stride(1);
|
||||
params.ssm_states_dstate_stride = ssm_states.stride(2);
|
||||
|
||||
params.cache_indices_stride = cache_indices.has_value() ? cache_indices.value().stride(0) : 0;
|
||||
|
||||
}
|
||||
else{
|
||||
if (!is_variable_B) {
|
||||
@ -537,8 +610,10 @@ void set_ssm_params_fwd(SSMParamsBase ¶ms,
|
||||
params.out_d_stride = out.stride(1);
|
||||
|
||||
params.ssm_states_batch_stride = ssm_states.stride(0);
|
||||
params.ssm_states_dim_stride = ssm_states.stride(1);
|
||||
params.ssm_states_dim_stride = ssm_states.stride(1);
|
||||
params.ssm_states_dstate_stride = ssm_states.stride(2);
|
||||
|
||||
params.cache_indices_stride = cache_indices.has_value() ? cache_indices.value().stride(0) : 0;
|
||||
}
|
||||
}
|
||||
|
||||
@ -554,7 +629,11 @@ void selective_scan_fwd(const torch::Tensor &u, const torch::Tensor &delta,
|
||||
const torch::Tensor &ssm_states,
|
||||
// used to identify padding entries if cache_indices provided
|
||||
// in case of padding, the kernel will return early
|
||||
int64_t pad_slot_id) {
|
||||
int64_t pad_slot_id,
|
||||
int64_t block_size,
|
||||
const std::optional<torch::Tensor> &block_idx_first_scheduled_token,
|
||||
const std::optional<torch::Tensor> &block_idx_last_scheduled_token,
|
||||
const std::optional<torch::Tensor> &initial_state_idx) {
|
||||
auto input_type = u.scalar_type();
|
||||
auto weight_type = A.scalar_type();
|
||||
TORCH_CHECK(input_type == at::ScalarType::Float || input_type == at::ScalarType::Half || input_type == at::ScalarType::BFloat16);
|
||||
@ -646,7 +725,16 @@ void selective_scan_fwd(const torch::Tensor &u, const torch::Tensor &delta,
|
||||
auto cache_indices_ = cache_indices.value();
|
||||
TORCH_CHECK(cache_indices_.scalar_type() == at::ScalarType::Int);
|
||||
TORCH_CHECK(cache_indices_.is_cuda());
|
||||
CHECK_SHAPE(cache_indices_, batch_size);
|
||||
|
||||
// cache_indices can be either 1D (batch_size,) for non-APC mode
|
||||
// or 2D (batch_size, max_positions) for APC mode
|
||||
const bool is_apc_mode = block_idx_first_scheduled_token.has_value();
|
||||
if (is_apc_mode) {
|
||||
TORCH_CHECK(cache_indices_.dim() == 2, "cache_indices must be 2D for APC mode");
|
||||
TORCH_CHECK(cache_indices_.size(0) == batch_size, "cache_indices first dimension must match batch_size");
|
||||
} else {
|
||||
CHECK_SHAPE(cache_indices_, batch_size);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -686,7 +774,11 @@ void selective_scan_fwd(const torch::Tensor &u, const torch::Tensor &delta,
|
||||
cache_indices,
|
||||
has_initial_state,
|
||||
varlen,
|
||||
pad_slot_id
|
||||
pad_slot_id,
|
||||
block_size,
|
||||
block_idx_first_scheduled_token,
|
||||
block_idx_last_scheduled_token,
|
||||
initial_state_idx
|
||||
);
|
||||
|
||||
|
||||
|
||||
@ -87,30 +87,23 @@ torch::Tensor dynamic_4bit_int_moe_cpu(
|
||||
const int64_t g_eff_13 = (group_size != -1) ? group_size : H;
|
||||
const int64_t g_eff_2 = (group_size != -1) ? group_size : I;
|
||||
|
||||
// Per-expert outputs filled in parallel
|
||||
std::vector<torch::Tensor> y_list(E);
|
||||
y_list.resize(E);
|
||||
auto X_all = x_c.index_select(/*dim=*/0, expert_tokens);
|
||||
if (apply_router_weight_on_input) {
|
||||
X_all = X_all.mul(expert_gates.unsqueeze(1));
|
||||
}
|
||||
auto Y_all = at::empty({offsets[E], H}, x_c.options());
|
||||
|
||||
at::parallel_for(0, E, 1, [&](int64_t e_begin, int64_t e_end) {
|
||||
c10::InferenceMode guard;
|
||||
for (int64_t e = e_begin; e < e_end; ++e) {
|
||||
const int64_t te = counts[e];
|
||||
if (te == 0) {
|
||||
y_list[e] = at::empty({0, H}, x_c.options());
|
||||
continue;
|
||||
}
|
||||
|
||||
const int64_t start = offsets[e];
|
||||
|
||||
auto sel_tokens =
|
||||
expert_tokens.narrow(/*dim=*/0, /*start=*/start, /*length=*/te);
|
||||
auto gates_e =
|
||||
expert_gates.narrow(/*dim=*/0, /*start=*/start, /*length=*/te);
|
||||
|
||||
auto x_e = x_c.index_select(/*dim=*/0, sel_tokens);
|
||||
|
||||
if (apply_router_weight_on_input) {
|
||||
x_e = x_e.mul(gates_e.unsqueeze(1));
|
||||
}
|
||||
auto x_e = X_all.narrow(/*dim=*/0, /*start=*/start, /*length=*/te);
|
||||
|
||||
auto w13_e = w13_packed.select(/*dim=*/0, e);
|
||||
auto w2_e = w2_packed.select(/*dim=*/0, e);
|
||||
@ -137,17 +130,15 @@ torch::Tensor dynamic_4bit_int_moe_cpu(
|
||||
// W2
|
||||
auto y = mm(act, w2_e, g_eff_2, /*in_features=*/I, /*out_features=*/H);
|
||||
|
||||
if (!apply_router_weight_on_input) {
|
||||
y = y.mul(gates_e.unsqueeze(1));
|
||||
}
|
||||
|
||||
// Store per-expert result
|
||||
y_list[e] = y;
|
||||
Y_all.narrow(/*dim=*/0, /*start=*/start, /*length=*/te).copy_(y);
|
||||
}
|
||||
});
|
||||
|
||||
// Concatenate all expert outputs to match expert_tokens order
|
||||
auto Y_all = at::cat(y_list, /*dim=*/0);
|
||||
if (!apply_router_weight_on_input) {
|
||||
Y_all = Y_all.mul(expert_gates.unsqueeze(1));
|
||||
}
|
||||
|
||||
auto out = at::zeros({T, H}, x.options());
|
||||
out =
|
||||
at::index_add(out, /*dim=*/0, /*index=*/expert_tokens, /*source=*/Y_all);
|
||||
|
||||
@ -427,11 +427,29 @@ __device__ inline bool is_finite(const T val) {
|
||||
#endif
|
||||
}
|
||||
|
||||
// Scoring function enums
|
||||
enum ScoringFunc {
|
||||
SCORING_NONE = 0, // no activation function
|
||||
SCORING_SIGMOID = 1 // apply sigmoid
|
||||
};
|
||||
|
||||
// Efficient sigmoid approximation from TensorRT-LLM
|
||||
__device__ inline float sigmoid_accurate(float x) {
|
||||
return 0.5f * tanhf(0.5f * x) + 0.5f;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
__device__ void topk_with_k2(T* output, T const* input,
|
||||
__device__ inline T apply_sigmoid(T val) {
|
||||
float f = cuda_cast<float, T>(val);
|
||||
return cuda_cast<T, float>(sigmoid_accurate(f));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
__device__ void topk_with_k2(T* output, T const* input, T const* bias,
|
||||
cg::thread_block_tile<32> const& tile,
|
||||
int32_t const lane_id,
|
||||
int const num_experts_per_group) {
|
||||
int const num_experts_per_group,
|
||||
int const scoring_func) {
|
||||
// Get the top2 per thread
|
||||
T largest = neg_inf<T>();
|
||||
T second_largest = neg_inf<T>();
|
||||
@ -439,6 +457,12 @@ __device__ void topk_with_k2(T* output, T const* input,
|
||||
if (num_experts_per_group > WARP_SIZE) {
|
||||
for (int i = lane_id; i < num_experts_per_group; i += WARP_SIZE) {
|
||||
T value = input[i];
|
||||
// Apply scoring function if needed
|
||||
if (scoring_func == SCORING_SIGMOID) {
|
||||
value = apply_sigmoid(value);
|
||||
}
|
||||
value = value + bias[i];
|
||||
|
||||
if (value > largest) {
|
||||
second_largest = largest;
|
||||
largest = value;
|
||||
@ -448,7 +472,13 @@ __device__ void topk_with_k2(T* output, T const* input,
|
||||
}
|
||||
} else {
|
||||
for (int i = lane_id; i < num_experts_per_group; i += WARP_SIZE) {
|
||||
largest = input[i];
|
||||
T value = input[i];
|
||||
// Apply scoring function if needed
|
||||
if (scoring_func == SCORING_SIGMOID) {
|
||||
value = apply_sigmoid(value);
|
||||
}
|
||||
value = value + bias[i];
|
||||
largest = value;
|
||||
}
|
||||
}
|
||||
|
||||
@ -472,17 +502,21 @@ __device__ void topk_with_k2(T* output, T const* input,
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
__global__ void topk_with_k2_kernel(T* output, T* input,
|
||||
__global__ void topk_with_k2_kernel(T* output, T* input, T const* bias,
|
||||
int64_t const num_tokens,
|
||||
int64_t const num_cases,
|
||||
int64_t const n_group,
|
||||
int64_t const num_experts_per_group) {
|
||||
int64_t const num_experts_per_group,
|
||||
int const scoring_func) {
|
||||
int32_t warp_id = threadIdx.x / WARP_SIZE;
|
||||
int32_t lane_id = threadIdx.x % WARP_SIZE;
|
||||
|
||||
int32_t case_id = blockIdx.x * NUM_WARPS_PER_BLOCK + warp_id;
|
||||
if (case_id < num_cases) {
|
||||
input += case_id * num_experts_per_group;
|
||||
// bias is per expert group, offset to current group
|
||||
int32_t group_id = case_id % n_group;
|
||||
T const* group_bias = bias + group_id * num_experts_per_group;
|
||||
output += case_id;
|
||||
|
||||
cg::thread_block block = cg::this_thread_block();
|
||||
@ -491,7 +525,8 @@ __global__ void topk_with_k2_kernel(T* output, T* input,
|
||||
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 900))
|
||||
asm volatile("griddepcontrol.wait;");
|
||||
#endif
|
||||
topk_with_k2(output, input, tile, lane_id, num_experts_per_group);
|
||||
topk_with_k2(output, input, group_bias, tile, lane_id,
|
||||
num_experts_per_group, scoring_func);
|
||||
}
|
||||
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 900))
|
||||
asm volatile("griddepcontrol.launch_dependents;");
|
||||
@ -500,16 +535,15 @@ __global__ void topk_with_k2_kernel(T* output, T* input,
|
||||
|
||||
template <typename T, typename IdxT>
|
||||
__global__ void group_idx_and_topk_idx_kernel(
|
||||
T* scores, T const* group_scores, T* topk_values, IdxT* topk_indices,
|
||||
T* scores_with_bias, int64_t const num_tokens, int64_t const n_group,
|
||||
T* scores, T const* group_scores, float* topk_values, IdxT* topk_indices,
|
||||
T const* bias, int64_t const num_tokens, int64_t const n_group,
|
||||
int64_t const topk_group, int64_t const topk, int64_t const num_experts,
|
||||
int64_t const num_experts_per_group, bool renormalize,
|
||||
double routed_scaling_factor) {
|
||||
double routed_scaling_factor, int scoring_func) {
|
||||
int32_t warp_id = threadIdx.x / WARP_SIZE;
|
||||
int32_t lane_id = threadIdx.x % WARP_SIZE;
|
||||
int32_t case_id =
|
||||
blockIdx.x * NUM_WARPS_PER_BLOCK + warp_id; // one per token
|
||||
scores_with_bias += case_id * num_experts;
|
||||
scores += case_id * num_experts;
|
||||
group_scores += case_id * n_group;
|
||||
topk_values += case_id * topk;
|
||||
@ -577,10 +611,16 @@ __global__ void group_idx_and_topk_idx_kernel(
|
||||
int32_t offset = i_group * num_experts_per_group;
|
||||
for (int32_t i = lane_id; i < align_num_experts_per_group;
|
||||
i += WARP_SIZE) {
|
||||
T candidates = (i < num_experts_per_group) &&
|
||||
is_finite(scores_with_bias[offset + i])
|
||||
? scores_with_bias[offset + i]
|
||||
: neg_inf<T>();
|
||||
T candidates = neg_inf<T>();
|
||||
if (i < num_experts_per_group) {
|
||||
// Apply scoring function (if any) and add bias
|
||||
T input = scores[offset + i];
|
||||
if (is_finite(input)) {
|
||||
T score = (scoring_func == SCORING_SIGMOID) ? apply_sigmoid(input)
|
||||
: input;
|
||||
candidates = score + bias[offset + i];
|
||||
}
|
||||
}
|
||||
queue.add(candidates, offset + i);
|
||||
}
|
||||
if (group_scores[i_group] == topk_group_value) {
|
||||
@ -602,11 +642,12 @@ __global__ void group_idx_and_topk_idx_kernel(
|
||||
for (int i = lane_id;
|
||||
i < warp_topk::round_up_to_multiple_of<WARP_SIZE>(topk);
|
||||
i += WARP_SIZE) {
|
||||
T value =
|
||||
i < topk
|
||||
? scores[s_topk_idx[i]]
|
||||
: cuda_cast<T, float>(0.0f); // Load the valid value of expert
|
||||
T value = cuda_cast<T, float>(0.0f);
|
||||
if (i < topk) {
|
||||
// Load the score value (without bias) for normalization
|
||||
T input = scores[s_topk_idx[i]];
|
||||
value =
|
||||
(scoring_func == SCORING_SIGMOID) ? apply_sigmoid(input) : input;
|
||||
s_topk_value[i] = value;
|
||||
}
|
||||
topk_sum +=
|
||||
@ -627,12 +668,12 @@ __global__ void group_idx_and_topk_idx_kernel(
|
||||
value = cuda_cast<float, T>(s_topk_value[i]) * routed_scaling_factor;
|
||||
}
|
||||
topk_indices[i] = s_topk_idx[i];
|
||||
topk_values[i] = cuda_cast<T, float>(value);
|
||||
topk_values[i] = value;
|
||||
}
|
||||
} else {
|
||||
for (int i = lane_id; i < topk; i += WARP_SIZE) {
|
||||
topk_indices[i] = i;
|
||||
topk_values[i] = cuda_cast<T, float>(1.0f / topk);
|
||||
topk_values[i] = 1.0f / topk;
|
||||
}
|
||||
}
|
||||
// Note: when if_proceed_next_topk==false, choose the first 8 experts as the
|
||||
@ -644,12 +685,12 @@ __global__ void group_idx_and_topk_idx_kernel(
|
||||
}
|
||||
|
||||
template <typename T, typename IdxT>
|
||||
void invokeNoAuxTc(T* scores, T* group_scores, T* topk_values,
|
||||
IdxT* topk_indices, T* scores_with_bias,
|
||||
int64_t const num_tokens, int64_t const num_experts,
|
||||
int64_t const n_group, int64_t const topk_group,
|
||||
int64_t const topk, bool const renormalize,
|
||||
double const routed_scaling_factor, bool enable_pdl = false,
|
||||
void invokeNoAuxTc(T* scores, T* group_scores, float* topk_values,
|
||||
IdxT* topk_indices, T const* bias, int64_t const num_tokens,
|
||||
int64_t const num_experts, int64_t const n_group,
|
||||
int64_t const topk_group, int64_t const topk,
|
||||
bool const renormalize, double const routed_scaling_factor,
|
||||
int const scoring_func, bool enable_pdl = false,
|
||||
cudaStream_t const stream = 0) {
|
||||
int64_t num_cases = num_tokens * n_group;
|
||||
int64_t topk_with_k2_num_blocks = (num_cases - 1) / NUM_WARPS_PER_BLOCK + 1;
|
||||
@ -664,8 +705,9 @@ void invokeNoAuxTc(T* scores, T* group_scores, T* topk_values,
|
||||
attrs[0].val.programmaticStreamSerializationAllowed = enable_pdl;
|
||||
config.numAttrs = 1;
|
||||
config.attrs = attrs;
|
||||
cudaLaunchKernelEx(&config, kernel_instance1, group_scores, scores_with_bias,
|
||||
num_tokens, num_cases, n_group, num_experts / n_group);
|
||||
cudaLaunchKernelEx(&config, kernel_instance1, group_scores, scores, bias,
|
||||
num_tokens, num_cases, n_group, num_experts / n_group,
|
||||
scoring_func);
|
||||
|
||||
int64_t topk_with_k_group_num_blocks =
|
||||
(num_tokens - 1) / NUM_WARPS_PER_BLOCK + 1;
|
||||
@ -682,19 +724,18 @@ void invokeNoAuxTc(T* scores, T* group_scores, T* topk_values,
|
||||
config.numAttrs = 1;
|
||||
config.attrs = attrs;
|
||||
cudaLaunchKernelEx(&config, kernel_instance2, scores, group_scores,
|
||||
topk_values, topk_indices, scores_with_bias, num_tokens,
|
||||
n_group, topk_group, topk, num_experts,
|
||||
num_experts / n_group, renormalize, routed_scaling_factor);
|
||||
topk_values, topk_indices, bias, num_tokens, n_group,
|
||||
topk_group, topk, num_experts, num_experts / n_group,
|
||||
renormalize, routed_scaling_factor, scoring_func);
|
||||
}
|
||||
|
||||
#define INSTANTIATE_NOAUX_TC(T, IdxT) \
|
||||
template void invokeNoAuxTc<T, IdxT>( \
|
||||
T * scores, T * group_scores, T * topk_values, IdxT * topk_indices, \
|
||||
T * scores_with_bias, int64_t const num_tokens, \
|
||||
int64_t const num_experts, int64_t const n_group, \
|
||||
int64_t const topk_group, int64_t const topk, bool const renormalize, \
|
||||
double const routed_scaling_factor, bool enable_pdl, \
|
||||
cudaStream_t const stream);
|
||||
T * scores, T * group_scores, float* topk_values, IdxT* topk_indices, \
|
||||
T const* bias, int64_t const num_tokens, int64_t const num_experts, \
|
||||
int64_t const n_group, int64_t const topk_group, int64_t const topk, \
|
||||
bool const renormalize, double const routed_scaling_factor, \
|
||||
int const scoring_func, bool enable_pdl, cudaStream_t const stream);
|
||||
|
||||
INSTANTIATE_NOAUX_TC(float, int32_t);
|
||||
INSTANTIATE_NOAUX_TC(half, int32_t);
|
||||
@ -703,28 +744,32 @@ INSTANTIATE_NOAUX_TC(__nv_bfloat16, int32_t);
|
||||
} // namespace vllm
|
||||
|
||||
std::tuple<torch::Tensor, torch::Tensor> grouped_topk(
|
||||
torch::Tensor const& scores, torch::Tensor const& scores_with_bias,
|
||||
int64_t n_group, int64_t topk_group, int64_t topk, bool renormalize,
|
||||
double routed_scaling_factor) {
|
||||
auto data_type = scores_with_bias.scalar_type();
|
||||
auto input_size = scores_with_bias.sizes();
|
||||
torch::Tensor const& scores, int64_t n_group, int64_t topk_group,
|
||||
int64_t topk, bool renormalize, double routed_scaling_factor,
|
||||
torch::Tensor const& bias, int64_t scoring_func = 0) {
|
||||
auto data_type = scores.scalar_type();
|
||||
auto input_size = scores.sizes();
|
||||
int64_t num_tokens = input_size[0];
|
||||
int64_t num_experts = input_size[1];
|
||||
TORCH_CHECK(input_size.size() == 2, "scores_with_bias must be a 2D Tensor");
|
||||
TORCH_CHECK(input_size.size() == 2, "scores must be a 2D Tensor");
|
||||
TORCH_CHECK(num_experts % n_group == 0,
|
||||
"num_experts should be divisible by n_group");
|
||||
TORCH_CHECK(n_group <= 32,
|
||||
"n_group should be smaller than or equal to 32 for now");
|
||||
TORCH_CHECK(topk <= 32, "topk should be smaller than or equal to 32 for now");
|
||||
TORCH_CHECK(scoring_func == vllm::moe::SCORING_NONE ||
|
||||
scoring_func == vllm::moe::SCORING_SIGMOID,
|
||||
"scoring_func must be SCORING_NONE (0) or SCORING_SIGMOID (1)");
|
||||
|
||||
torch::Tensor group_scores = torch::empty(
|
||||
{num_tokens, n_group}, torch::dtype(data_type).device(torch::kCUDA));
|
||||
// Always output float32 for topk_values (eliminates Python-side conversion)
|
||||
torch::Tensor topk_values = torch::empty(
|
||||
{num_tokens, topk}, torch::dtype(data_type).device(torch::kCUDA));
|
||||
{num_tokens, topk}, torch::dtype(torch::kFloat32).device(torch::kCUDA));
|
||||
torch::Tensor topk_indices = torch::empty(
|
||||
{num_tokens, topk}, torch::dtype(torch::kInt32).device(torch::kCUDA));
|
||||
|
||||
auto stream = c10::cuda::getCurrentCUDAStream(scores_with_bias.get_device());
|
||||
auto stream = c10::cuda::getCurrentCUDAStream(scores.get_device());
|
||||
|
||||
switch (data_type) {
|
||||
case torch::kFloat16:
|
||||
@ -732,11 +777,11 @@ std::tuple<torch::Tensor, torch::Tensor> grouped_topk(
|
||||
vllm::moe::invokeNoAuxTc<half, int32_t>(
|
||||
reinterpret_cast<half*>(scores.mutable_data_ptr()),
|
||||
reinterpret_cast<half*>(group_scores.mutable_data_ptr()),
|
||||
reinterpret_cast<half*>(topk_values.mutable_data_ptr()),
|
||||
reinterpret_cast<float*>(topk_values.mutable_data_ptr()),
|
||||
reinterpret_cast<int32_t*>(topk_indices.mutable_data_ptr()),
|
||||
reinterpret_cast<half*>(scores_with_bias.data_ptr()), num_tokens,
|
||||
reinterpret_cast<half const*>(bias.data_ptr()), num_tokens,
|
||||
num_experts, n_group, topk_group, topk, renormalize,
|
||||
routed_scaling_factor, false, stream);
|
||||
routed_scaling_factor, static_cast<int>(scoring_func), false, stream);
|
||||
break;
|
||||
case torch::kFloat32:
|
||||
// Handle Float32
|
||||
@ -745,20 +790,20 @@ std::tuple<torch::Tensor, torch::Tensor> grouped_topk(
|
||||
reinterpret_cast<float*>(group_scores.mutable_data_ptr()),
|
||||
reinterpret_cast<float*>(topk_values.mutable_data_ptr()),
|
||||
reinterpret_cast<int32_t*>(topk_indices.mutable_data_ptr()),
|
||||
reinterpret_cast<float*>(scores_with_bias.data_ptr()), num_tokens,
|
||||
reinterpret_cast<float const*>(bias.data_ptr()), num_tokens,
|
||||
num_experts, n_group, topk_group, topk, renormalize,
|
||||
routed_scaling_factor, false, stream);
|
||||
routed_scaling_factor, static_cast<int>(scoring_func), false, stream);
|
||||
break;
|
||||
case torch::kBFloat16:
|
||||
// Handle BFloat16
|
||||
vllm::moe::invokeNoAuxTc<__nv_bfloat16, int32_t>(
|
||||
reinterpret_cast<__nv_bfloat16*>(scores.mutable_data_ptr()),
|
||||
reinterpret_cast<__nv_bfloat16*>(group_scores.mutable_data_ptr()),
|
||||
reinterpret_cast<__nv_bfloat16*>(topk_values.mutable_data_ptr()),
|
||||
reinterpret_cast<float*>(topk_values.mutable_data_ptr()),
|
||||
reinterpret_cast<int32_t*>(topk_indices.mutable_data_ptr()),
|
||||
reinterpret_cast<__nv_bfloat16*>(scores_with_bias.data_ptr()),
|
||||
num_tokens, num_experts, n_group, topk_group, topk, renormalize,
|
||||
routed_scaling_factor, false, stream);
|
||||
reinterpret_cast<__nv_bfloat16 const*>(bias.data_ptr()), num_tokens,
|
||||
num_experts, n_group, topk_group, topk, renormalize,
|
||||
routed_scaling_factor, static_cast<int>(scoring_func), false, stream);
|
||||
break;
|
||||
default:
|
||||
// Handle other data types
|
||||
|
||||
@ -28,11 +28,16 @@ __global__ void moe_lora_align_sum_kernel(
|
||||
int64_t block_size, int num_experts, int max_loras, size_t numel,
|
||||
int max_num_tokens_padded, int max_num_m_blocks,
|
||||
int32_t* __restrict__ sorted_token_ids, int32_t* __restrict__ expert_ids,
|
||||
int topk_num, int32_t* total_tokens_post_pad) {
|
||||
int topk_num, int32_t* total_tokens_post_pad, int32_t* adapter_enabled,
|
||||
int32_t* lora_ids) {
|
||||
const size_t tokens_per_thread = div_ceil(numel, blockDim.x);
|
||||
const size_t start_idx = threadIdx.x * tokens_per_thread;
|
||||
|
||||
int lora_id = blockIdx.x;
|
||||
int lora_idx = blockIdx.x;
|
||||
int lora_id = lora_ids[lora_idx];
|
||||
if (lora_id == -1 || adapter_enabled[lora_id] == 0) {
|
||||
return;
|
||||
}
|
||||
extern __shared__ int32_t shared_mem[];
|
||||
int32_t* cumsum = shared_mem;
|
||||
token_cnts_t* tokens_cnts = (token_cnts_t*)(shared_mem + num_experts + 1);
|
||||
@ -121,14 +126,13 @@ __global__ void moe_lora_align_sum_kernel(
|
||||
}
|
||||
}
|
||||
|
||||
void moe_lora_align_block_size(torch::Tensor topk_ids,
|
||||
torch::Tensor token_lora_mapping,
|
||||
int64_t num_experts, int64_t block_size,
|
||||
int64_t max_loras, int64_t max_num_tokens_padded,
|
||||
int64_t max_num_m_blocks,
|
||||
torch::Tensor sorted_token_ids,
|
||||
torch::Tensor expert_ids,
|
||||
torch::Tensor num_tokens_post_pad) {
|
||||
void moe_lora_align_block_size(
|
||||
torch::Tensor topk_ids, torch::Tensor token_lora_mapping,
|
||||
int64_t num_experts, int64_t block_size, int64_t max_loras,
|
||||
int64_t max_num_tokens_padded, int64_t max_num_m_blocks,
|
||||
torch::Tensor sorted_token_ids, torch::Tensor expert_ids,
|
||||
torch::Tensor num_tokens_post_pad, torch::Tensor adapter_enabled,
|
||||
torch::Tensor lora_ids) {
|
||||
const int topk_num = topk_ids.size(1);
|
||||
|
||||
TORCH_CHECK(block_size > 0, "block_size should be greater than 0. ");
|
||||
@ -164,6 +168,7 @@ void moe_lora_align_block_size(torch::Tensor topk_ids,
|
||||
max_loras, topk_ids.numel(), max_num_tokens_padded,
|
||||
max_num_m_blocks, sorted_token_ids.data_ptr<int32_t>(),
|
||||
expert_ids.data_ptr<int32_t>(), topk_num,
|
||||
num_tokens_post_pad.data_ptr<int32_t>());
|
||||
num_tokens_post_pad.data_ptr<int32_t>(),
|
||||
adapter_enabled.data_ptr<int32_t>(), lora_ids.data_ptr<int32_t>());
|
||||
});
|
||||
}
|
||||
@ -20,14 +20,13 @@ void batched_moe_align_block_size(int64_t max_tokens_per_batch,
|
||||
torch::Tensor expert_ids,
|
||||
torch::Tensor num_tokens_post_pad);
|
||||
|
||||
void moe_lora_align_block_size(torch::Tensor topk_ids,
|
||||
torch::Tensor token_lora_mapping,
|
||||
int64_t num_experts, int64_t block_size,
|
||||
int64_t max_loras, int64_t max_num_tokens_padded,
|
||||
int64_t max_num_m_blocks,
|
||||
torch::Tensor sorted_token_ids,
|
||||
torch::Tensor expert_ids,
|
||||
torch::Tensor num_tokens_post_pad);
|
||||
void moe_lora_align_block_size(
|
||||
torch::Tensor topk_ids, torch::Tensor token_lora_mapping,
|
||||
int64_t num_experts, int64_t block_size, int64_t max_loras,
|
||||
int64_t max_num_tokens_padded, int64_t max_num_m_blocks,
|
||||
torch::Tensor sorted_token_ids, torch::Tensor expert_ids,
|
||||
torch::Tensor num_tokens_post_pad, torch::Tensor adapter_enabled,
|
||||
torch::Tensor lora_ids);
|
||||
#ifndef USE_ROCM
|
||||
torch::Tensor moe_wna16_gemm(torch::Tensor input, torch::Tensor output,
|
||||
torch::Tensor b_qweight, torch::Tensor b_scales,
|
||||
@ -40,9 +39,9 @@ torch::Tensor moe_wna16_gemm(torch::Tensor input, torch::Tensor output,
|
||||
int64_t BLOCK_SIZE_K, int64_t bit);
|
||||
|
||||
std::tuple<torch::Tensor, torch::Tensor> grouped_topk(
|
||||
torch::Tensor const& scores, torch::Tensor const& scores_with_bias,
|
||||
int64_t n_group, int64_t topk_group, int64_t topk, bool renormalize,
|
||||
double routed_scaling_factor);
|
||||
torch::Tensor const& scores, int64_t n_group, int64_t topk_group,
|
||||
int64_t topk, bool renormalize, double routed_scaling_factor,
|
||||
torch::Tensor const& bias, int64_t scoring_func);
|
||||
#endif
|
||||
|
||||
bool moe_permute_unpermute_supported();
|
||||
|
||||
@ -44,7 +44,9 @@ TORCH_LIBRARY_EXPAND(TORCH_EXTENSION_NAME, m) {
|
||||
" int max_num_m_blocks, "
|
||||
" Tensor !sorted_token_ids,"
|
||||
" Tensor !experts_ids,"
|
||||
" Tensor !num_tokens_post_pad) -> () ");
|
||||
" Tensor !num_tokens_post_pad,"
|
||||
" Tensor !adapter_enabled,"
|
||||
" Tensor !lora_ids) -> () ");
|
||||
m.impl("moe_lora_align_block_size", torch::kCUDA, &moe_lora_align_block_size);
|
||||
|
||||
#ifndef USE_ROCM
|
||||
@ -105,9 +107,10 @@ TORCH_LIBRARY_EXPAND(TORCH_EXTENSION_NAME, m) {
|
||||
|
||||
// Apply grouped topk routing to select experts.
|
||||
m.def(
|
||||
"grouped_topk(Tensor scores, Tensor scores_with_bias, int n_group, int "
|
||||
"grouped_topk(Tensor scores, int n_group, int "
|
||||
"topk_group, int topk, bool renormalize, float "
|
||||
"routed_scaling_factor) -> (Tensor, Tensor)");
|
||||
"routed_scaling_factor, Tensor bias, int scoring_func) -> (Tensor, "
|
||||
"Tensor)");
|
||||
m.impl("grouped_topk", torch::kCUDA, &grouped_topk);
|
||||
#endif
|
||||
}
|
||||
|
||||
24
csrc/ops.h
24
csrc/ops.h
@ -321,17 +321,19 @@ void dynamic_per_token_scaled_fp8_quant(
|
||||
torch::Tensor& out, torch::Tensor const& input, torch::Tensor& scale,
|
||||
std::optional<torch::Tensor> const& scale_ub);
|
||||
|
||||
void selective_scan_fwd(const torch::Tensor& u, const torch::Tensor& delta,
|
||||
const torch::Tensor& A, const torch::Tensor& B,
|
||||
const torch::Tensor& C,
|
||||
const std::optional<torch::Tensor>& D_,
|
||||
const std::optional<torch::Tensor>& z_,
|
||||
const std::optional<torch::Tensor>& delta_bias_,
|
||||
bool delta_softplus,
|
||||
const std::optional<torch::Tensor>& query_start_loc,
|
||||
const std::optional<torch::Tensor>& cache_indices,
|
||||
const std::optional<torch::Tensor>& has_initial_state,
|
||||
const torch::Tensor& ssm_states, int64_t pad_slot_id);
|
||||
void selective_scan_fwd(
|
||||
const torch::Tensor& u, const torch::Tensor& delta, const torch::Tensor& A,
|
||||
const torch::Tensor& B, const torch::Tensor& C,
|
||||
const std::optional<torch::Tensor>& D_,
|
||||
const std::optional<torch::Tensor>& z_,
|
||||
const std::optional<torch::Tensor>& delta_bias_, bool delta_softplus,
|
||||
const std::optional<torch::Tensor>& query_start_loc,
|
||||
const std::optional<torch::Tensor>& cache_indices,
|
||||
const std::optional<torch::Tensor>& has_initial_state,
|
||||
const torch::Tensor& ssm_states, int64_t pad_slot_id, int64_t block_size,
|
||||
const std::optional<torch::Tensor>& block_idx_first_scheduled_token,
|
||||
const std::optional<torch::Tensor>& block_idx_last_scheduled_token,
|
||||
const std::optional<torch::Tensor>& initial_state_idx);
|
||||
|
||||
torch::Tensor dynamic_4bit_int_moe_cpu(
|
||||
torch::Tensor x, torch::Tensor topk_ids, torch::Tensor topk_weights,
|
||||
|
||||
@ -578,11 +578,13 @@ void persistent_masked_m_silu_mul_quant(
|
||||
|
||||
// This kernel currently only supports H % 128 == 0 and assumes a
|
||||
// fixed GROUP_SIZE of 128.
|
||||
static constexpr int GROUP_SIZE = 128;
|
||||
|
||||
TORCH_CHECK(input.dtype() == torch::kBFloat16);
|
||||
TORCH_CHECK(y_q.dtype() == torch::kFloat8_e4m3fn ||
|
||||
y_q.dtype() == torch::kFloat8_e4m3fnuz);
|
||||
TORCH_CHECK(y_s.dtype() == torch::kFloat32);
|
||||
TORCH_CHECK(input.size(-1) % 256 == 0);
|
||||
TORCH_CHECK(input.size(-1) % (GROUP_SIZE * 2) == 0);
|
||||
|
||||
using Idx_t = int64_t;
|
||||
|
||||
@ -601,8 +603,6 @@ void persistent_masked_m_silu_mul_quant(
|
||||
|
||||
Idx_t stride_counts_e = tokens_per_expert.stride(0);
|
||||
|
||||
static constexpr int GROUP_SIZE = 128;
|
||||
|
||||
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
|
||||
|
||||
#define KERNEL(BLOCK_COUNT, USE_UE8M0, THREAD_COUNT, STAGES) \
|
||||
@ -628,21 +628,26 @@ void persistent_masked_m_silu_mul_quant(
|
||||
|
||||
static constexpr int SILU_V2_BLOCK_COUNT = 132 * 32;
|
||||
|
||||
int const NUM_GROUPS = H / GROUP_SIZE;
|
||||
if (!use_ue8m0) {
|
||||
if (H >= 4096) {
|
||||
if (H >= 4096 && (NUM_GROUPS % 8 == 0)) {
|
||||
/* 8 warps config */
|
||||
static constexpr int NUM_STAGES = 4;
|
||||
static constexpr int THREAD_COUNT = 256;
|
||||
KERNEL(SILU_V2_BLOCK_COUNT, false, THREAD_COUNT, NUM_STAGES);
|
||||
} else {
|
||||
/* 1 warp config */
|
||||
static constexpr int THREAD_COUNT = 32;
|
||||
KERNEL(SILU_V2_BLOCK_COUNT, false, THREAD_COUNT, 2);
|
||||
}
|
||||
} else {
|
||||
if (H >= 4096) {
|
||||
if (H >= 4096 && (NUM_GROUPS % 8 == 0)) {
|
||||
/* 8 warps config */
|
||||
static constexpr int NUM_STAGES = 4;
|
||||
static constexpr int THREAD_COUNT = 256;
|
||||
KERNEL(SILU_V2_BLOCK_COUNT, true, THREAD_COUNT, NUM_STAGES);
|
||||
} else {
|
||||
/* 1 warp config */
|
||||
static constexpr int THREAD_COUNT = 32;
|
||||
KERNEL(SILU_V2_BLOCK_COUNT, true, THREAD_COUNT, 2);
|
||||
}
|
||||
|
||||
@ -31,6 +31,13 @@
|
||||
|
||||
namespace vllm {
|
||||
|
||||
template <typename Int>
|
||||
__host__ __device__ inline Int round_up(Int x, Int y) {
|
||||
static_assert(std::is_integral_v<Int>,
|
||||
"round_up argument must be integral type");
|
||||
return (x + y - 1) / y * y;
|
||||
}
|
||||
|
||||
// Use UE4M3 by default.
|
||||
template <class Type, bool UE8M0_SF = false>
|
||||
__global__ void __launch_bounds__(512, VLLM_BLOCKS_PER_SM(512))
|
||||
@ -42,10 +49,21 @@ __global__ void __launch_bounds__(512, VLLM_BLOCKS_PER_SM(512))
|
||||
static_assert(sizeof(PackedVec) == sizeof(Type) * CVT_FP4_ELTS_PER_THREAD,
|
||||
"Vec size is not matched.");
|
||||
|
||||
int sf_m = round_up<int>(numRows, 128);
|
||||
int sf_n_unpadded = numCols / CVT_FP4_SF_VEC_SIZE;
|
||||
int sf_n_int = round_up<int>(sf_n_unpadded, 4) / 4;
|
||||
for (int row = numRows + blockIdx.x; row < sf_m; row += gridDim.x) {
|
||||
// Each thread writes 4 uint32_t elements.
|
||||
for (int col = sf_n_unpadded + threadIdx.x * 4; col < sf_n_int;
|
||||
col += blockDim.x * 4) {
|
||||
SFout[row * sf_n_int + col] = 0x00;
|
||||
}
|
||||
}
|
||||
|
||||
// Get the global scaling factor, which will be applied to the SF.
|
||||
// Note SFScale is the same as next GEMM's alpha, which is
|
||||
// (448.f / (Alpha_A / 6.f)).
|
||||
float const SFScaleVal = SFScale == nullptr ? 1.0f : SFScale[0];
|
||||
float const global_scale = SFScale == nullptr ? 1.0f : SFScale[0];
|
||||
|
||||
// Input tensor row/col loops.
|
||||
for (int rowIdx = blockIdx.x; rowIdx < numRows; rowIdx += gridDim.x) {
|
||||
@ -64,7 +82,7 @@ __global__ void __launch_bounds__(512, VLLM_BLOCKS_PER_SM(512))
|
||||
rowIdx, colIdx, numCols, SFout);
|
||||
|
||||
out_pos =
|
||||
cvt_warp_fp16_to_fp4<Type, UE8M0_SF>(in_vec, SFScaleVal, sf_out);
|
||||
cvt_warp_fp16_to_fp4<Type, UE8M0_SF>(in_vec, global_scale, sf_out);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,6 +1,5 @@
|
||||
#include "scaled_mm_kernels.hpp"
|
||||
#include "scaled_mm_sm100_fp8_dispatch.cuh"
|
||||
#include "cutlass_extensions/epilogue/scaled_mm_epilogues_c3x.hpp"
|
||||
|
||||
namespace vllm {
|
||||
|
||||
@ -13,11 +12,11 @@ void cutlass_scaled_mm_sm100_fp8(torch::Tensor& out, torch::Tensor const& a,
|
||||
if (bias) {
|
||||
TORCH_CHECK(bias->dtype() == out.dtype(),
|
||||
"currently bias dtype must match output dtype ", out.dtype());
|
||||
return cutlass_scaled_mm_sm100_fp8_epilogue<c3x::ScaledEpilogueBias>(
|
||||
out, a, b, a_scales, b_scales, *bias);
|
||||
return cutlass_scaled_mm_sm100_fp8_epilogue<true>(out, a, b, a_scales,
|
||||
b_scales, *bias);
|
||||
} else {
|
||||
return cutlass_scaled_mm_sm100_fp8_epilogue<c3x::ScaledEpilogue>(
|
||||
out, a, b, a_scales, b_scales);
|
||||
return cutlass_scaled_mm_sm100_fp8_epilogue<false>(out, a, b, a_scales,
|
||||
b_scales);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -2,6 +2,7 @@
|
||||
|
||||
#include "scaled_mm.cuh"
|
||||
#include "cutlass_gemm_caller.cuh"
|
||||
#include "cutlass_extensions/epilogue/scaled_mm_epilogues_c3x.hpp"
|
||||
|
||||
/**
|
||||
* This file defines Gemm kernel configurations for SM100 (fp8) based on the
|
||||
@ -12,8 +13,88 @@ namespace vllm {
|
||||
|
||||
using c3x::cutlass_gemm_caller;
|
||||
|
||||
template <typename InType, typename OutType,
|
||||
template <typename, typename, typename> typename Epilogue>
|
||||
template <typename ElementAB_, typename ElementD_,
|
||||
template <typename, typename, typename> typename Epilogue_,
|
||||
typename TileShape, typename ClusterShape, typename KernelSchedule,
|
||||
typename EpilogueSchedule, bool swap_ab_ = false>
|
||||
struct cutlass_3x_gemm_sm100_fp8 {
|
||||
using ElementAB = ElementAB_;
|
||||
using ElementC = ElementD_;
|
||||
using ElementD = ElementD_;
|
||||
using ElementAcc =
|
||||
typename std::conditional<std::is_same_v<ElementAB, int8_t>, int32_t,
|
||||
float>::type;
|
||||
|
||||
using Epilogue = Epilogue_<ElementAcc, ElementD, TileShape>;
|
||||
|
||||
using EVTCompute = typename Epilogue::EVTCompute;
|
||||
|
||||
static constexpr int AlignmentAB =
|
||||
128 / cutlass::sizeof_bits<ElementAB>::value;
|
||||
static constexpr int AlignmentCD =
|
||||
128 / cutlass::sizeof_bits<ElementD>::value;
|
||||
|
||||
// Compile-time swap_ab flag
|
||||
static constexpr bool swap_ab = swap_ab_;
|
||||
|
||||
// -----------------------------------------------------------
|
||||
// Layout definitions
|
||||
// -----------------------------------------------------------
|
||||
using LayoutA = cutlass::layout::RowMajor;
|
||||
using LayoutA_T = typename cutlass::layout::LayoutTranspose<LayoutA>::type;
|
||||
|
||||
using LayoutB = cutlass::layout::ColumnMajor;
|
||||
using LayoutB_T = typename cutlass::layout::LayoutTranspose<LayoutB>::type;
|
||||
|
||||
using LayoutD = cutlass::layout::RowMajor;
|
||||
using LayoutD_Transpose =
|
||||
typename cutlass::layout::LayoutTranspose<LayoutD>::type;
|
||||
|
||||
using LayoutC = LayoutD;
|
||||
using LayoutC_Transpose = LayoutD_Transpose;
|
||||
|
||||
// -----------------------------------------------------------
|
||||
// Collective epilogue (conditionally swap operands and layouts)
|
||||
// -----------------------------------------------------------
|
||||
using CollectiveEpilogue =
|
||||
typename cutlass::epilogue::collective::CollectiveBuilder<
|
||||
cutlass::arch::Sm100, cutlass::arch::OpClassTensorOp, TileShape,
|
||||
ClusterShape, cutlass::epilogue::collective::EpilogueTileAuto,
|
||||
ElementAcc, float, ElementC,
|
||||
conditional_t<swap_ab, LayoutC_Transpose, LayoutC>, AlignmentCD,
|
||||
ElementD, conditional_t<swap_ab, LayoutD_Transpose, LayoutD>,
|
||||
AlignmentCD, EpilogueSchedule, EVTCompute>::CollectiveOp;
|
||||
|
||||
static constexpr size_t CEStorageSize =
|
||||
sizeof(typename CollectiveEpilogue::SharedStorage);
|
||||
|
||||
using Stages = typename cutlass::gemm::collective::StageCountAutoCarveout<
|
||||
static_cast<int>(CEStorageSize)>;
|
||||
|
||||
// -----------------------------------------------------------
|
||||
// Collective mainloop (conditionally swap operands and layouts)
|
||||
// -----------------------------------------------------------
|
||||
using CollectiveMainloop = conditional_t<
|
||||
swap_ab,
|
||||
typename cutlass::gemm::collective::CollectiveBuilder<
|
||||
cutlass::arch::Sm100, cutlass::arch::OpClassTensorOp, ElementAB,
|
||||
LayoutB_T, AlignmentAB, // Swapped B (as A)
|
||||
ElementAB, LayoutA_T, AlignmentAB, // Swapped A (as B)
|
||||
ElementAcc, TileShape, ClusterShape, Stages,
|
||||
KernelSchedule>::CollectiveOp,
|
||||
typename cutlass::gemm::collective::CollectiveBuilder<
|
||||
cutlass::arch::Sm100, cutlass::arch::OpClassTensorOp, ElementAB,
|
||||
LayoutA, AlignmentAB, ElementAB, LayoutB, AlignmentAB, ElementAcc,
|
||||
TileShape, ClusterShape, Stages, KernelSchedule>::CollectiveOp>;
|
||||
|
||||
// -----------------------------------------------------------
|
||||
// Kernel definition
|
||||
// -----------------------------------------------------------
|
||||
using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
|
||||
Shape<int, int, int, int>, CollectiveMainloop, CollectiveEpilogue, void>;
|
||||
};
|
||||
|
||||
template <typename InType, typename OutType, bool EnableBias>
|
||||
struct sm100_fp8_config_default {
|
||||
// M in (256, inf)
|
||||
static_assert(std::is_same<InType, cutlass::float_e4m3_t>());
|
||||
@ -22,12 +103,16 @@ struct sm100_fp8_config_default {
|
||||
using TileShape = Shape<_256, _128, _128>;
|
||||
using ClusterShape = Shape<_2, _2, _1>;
|
||||
using Cutlass3xGemm =
|
||||
cutlass_3x_gemm_sm100<InType, OutType, Epilogue, TileShape, ClusterShape,
|
||||
KernelSchedule, EpilogueSchedule>;
|
||||
conditional_t<EnableBias,
|
||||
cutlass_3x_gemm_sm100_fp8<
|
||||
InType, OutType, c3x::ScaledEpilogueBias, TileShape,
|
||||
ClusterShape, KernelSchedule, EpilogueSchedule>,
|
||||
cutlass_3x_gemm_sm100_fp8<
|
||||
InType, OutType, c3x::ScaledEpilogue, TileShape,
|
||||
ClusterShape, KernelSchedule, EpilogueSchedule>>;
|
||||
};
|
||||
|
||||
template <typename InType, typename OutType,
|
||||
template <typename, typename, typename> typename Epilogue>
|
||||
template <typename InType, typename OutType, bool EnableBias>
|
||||
struct sm100_fp8_config_M256 {
|
||||
// M in (64, 256]
|
||||
static_assert(std::is_same<InType, cutlass::float_e4m3_t>());
|
||||
@ -36,44 +121,127 @@ struct sm100_fp8_config_M256 {
|
||||
using TileShape = Shape<_128, _128, _128>;
|
||||
using ClusterShape = Shape<_2, _1, _1>;
|
||||
using Cutlass3xGemm =
|
||||
cutlass_3x_gemm_sm100<InType, OutType, Epilogue, TileShape, ClusterShape,
|
||||
KernelSchedule, EpilogueSchedule>;
|
||||
conditional_t<EnableBias,
|
||||
cutlass_3x_gemm_sm100_fp8<
|
||||
InType, OutType, c3x::ScaledEpilogueBias, TileShape,
|
||||
ClusterShape, KernelSchedule, EpilogueSchedule>,
|
||||
cutlass_3x_gemm_sm100_fp8<
|
||||
InType, OutType, c3x::ScaledEpilogue, TileShape,
|
||||
ClusterShape, KernelSchedule, EpilogueSchedule>>;
|
||||
};
|
||||
|
||||
template <typename InType, typename OutType,
|
||||
template <typename, typename, typename> typename Epilogue>
|
||||
template <typename InType, typename OutType, bool EnableBias>
|
||||
struct sm100_fp8_config_M64_swap_ab {
|
||||
// This config is for M in (16, 64] and K >= 4096
|
||||
static_assert(std::is_same<InType, cutlass::float_e4m3_t>());
|
||||
using KernelSchedule = cutlass::gemm::collective::KernelScheduleAuto;
|
||||
using EpilogueSchedule = cutlass::epilogue::collective::EpilogueScheduleAuto;
|
||||
using TileShape = Shape<_128, _64, _256>;
|
||||
using ClusterShape = Shape<_4, _1, _1>;
|
||||
|
||||
// Use ScaledEpilogueColumnBias instead of ScaledEpilogueBias when doing swap
|
||||
// AB
|
||||
using Cutlass3xGemm = conditional_t<
|
||||
EnableBias,
|
||||
cutlass_3x_gemm_sm100_fp8<InType, OutType, c3x::ScaledEpilogueColumnBias,
|
||||
TileShape, ClusterShape, KernelSchedule,
|
||||
EpilogueSchedule, true>,
|
||||
cutlass_3x_gemm_sm100_fp8<InType, OutType, c3x::ScaledEpilogue, TileShape,
|
||||
ClusterShape, KernelSchedule, EpilogueSchedule,
|
||||
true>>;
|
||||
};
|
||||
|
||||
template <typename InType, typename OutType, bool EnableBias>
|
||||
struct sm100_fp8_config_M64 {
|
||||
// M in (16, 64]
|
||||
// This config is for M = 64 and K < 4096 (do not enable swap AB in such case)
|
||||
static_assert(std::is_same<InType, cutlass::float_e4m3_t>());
|
||||
using KernelSchedule = cutlass::gemm::collective::KernelScheduleAuto;
|
||||
using EpilogueSchedule = cutlass::epilogue::collective::EpilogueScheduleAuto;
|
||||
using TileShape = Shape<_64, _64, _128>;
|
||||
using ClusterShape = Shape<_1, _1, _1>;
|
||||
|
||||
using Cutlass3xGemm =
|
||||
cutlass_3x_gemm_sm100<InType, OutType, Epilogue, TileShape, ClusterShape,
|
||||
KernelSchedule, EpilogueSchedule>;
|
||||
conditional_t<EnableBias,
|
||||
cutlass_3x_gemm_sm100_fp8<
|
||||
InType, OutType, c3x::ScaledEpilogueBias, TileShape,
|
||||
ClusterShape, KernelSchedule, EpilogueSchedule>,
|
||||
cutlass_3x_gemm_sm100_fp8<
|
||||
InType, OutType, c3x::ScaledEpilogue, TileShape,
|
||||
ClusterShape, KernelSchedule, EpilogueSchedule>>;
|
||||
};
|
||||
|
||||
template <typename InType, typename OutType,
|
||||
template <typename, typename, typename> typename Epilogue>
|
||||
struct sm100_fp8_config_M16 {
|
||||
template <typename InType, typename OutType, bool EnableBias>
|
||||
struct sm100_fp8_config_M16_swap_ab {
|
||||
// M in [1, 16]
|
||||
static_assert(std::is_same<InType, cutlass::float_e4m3_t>());
|
||||
using KernelSchedule = cutlass::gemm::collective::KernelScheduleAuto;
|
||||
using EpilogueSchedule = cutlass::epilogue::collective::EpilogueScheduleAuto;
|
||||
using TileShape = Shape<_64, _64, _128>;
|
||||
using ClusterShape = Shape<_1, _4, _1>;
|
||||
using Cutlass3xGemm =
|
||||
cutlass_3x_gemm_sm100<InType, OutType, Epilogue, TileShape, ClusterShape,
|
||||
KernelSchedule, EpilogueSchedule>;
|
||||
using TileShape = Shape<_128, _32, _128>;
|
||||
using ClusterShape = Shape<_4, _1, _1>;
|
||||
|
||||
// Use ScaledEpilogueColumnBias instead of ScaledEpilogueBias when doing swap
|
||||
// AB
|
||||
using Cutlass3xGemm = conditional_t<
|
||||
EnableBias,
|
||||
cutlass_3x_gemm_sm100_fp8<InType, OutType, c3x::ScaledEpilogueColumnBias,
|
||||
TileShape, ClusterShape, KernelSchedule,
|
||||
EpilogueSchedule, true>,
|
||||
cutlass_3x_gemm_sm100_fp8<InType, OutType, c3x::ScaledEpilogue, TileShape,
|
||||
ClusterShape, KernelSchedule, EpilogueSchedule,
|
||||
true>>;
|
||||
};
|
||||
|
||||
template <typename InType, typename OutType,
|
||||
template <typename, typename, typename> typename Epilogue,
|
||||
template <typename Gemm, typename... EpilogueArgs>
|
||||
void cutlass_gemm_caller_sm100_fp8(torch::Tensor& out, torch::Tensor const& a,
|
||||
torch::Tensor const& b,
|
||||
EpilogueArgs&&... epilogue_params) {
|
||||
static constexpr bool swap_ab = Gemm::swap_ab;
|
||||
using ElementAB = typename Gemm::ElementAB;
|
||||
using ElementD = typename Gemm::ElementD;
|
||||
using GemmKernel = typename Gemm::GemmKernel;
|
||||
|
||||
using StrideA = typename Gemm::GemmKernel::StrideA;
|
||||
using StrideB = typename Gemm::GemmKernel::StrideB;
|
||||
using StrideC = typename Gemm::GemmKernel::StrideC;
|
||||
|
||||
int32_t m = a.size(0), n = b.size(1), k = a.size(1);
|
||||
auto prob_shape =
|
||||
swap_ab ? cute::make_shape(n, m, k, 1) : cute::make_shape(m, n, k, 1);
|
||||
|
||||
StrideA a_stride =
|
||||
cutlass::make_cute_packed_stride(StrideA{}, cute::make_shape(m, k, 1));
|
||||
StrideB b_stride =
|
||||
cutlass::make_cute_packed_stride(StrideB{}, cute::make_shape(n, k, 1));
|
||||
StrideC c_stride = cutlass::make_cute_packed_stride(
|
||||
StrideC{},
|
||||
swap_ab ? cute::make_shape(n, m, 1) : cute::make_shape(m, n, 1));
|
||||
|
||||
auto a_ptr = static_cast<ElementAB*>(a.data_ptr());
|
||||
auto b_ptr = static_cast<ElementAB*>(b.data_ptr());
|
||||
auto c_ptr = static_cast<ElementD*>(out.data_ptr());
|
||||
|
||||
typename GemmKernel::MainloopArguments mainloop_args =
|
||||
swap_ab ? typename GemmKernel::MainloopArguments{b_ptr, b_stride, a_ptr,
|
||||
a_stride}
|
||||
: typename GemmKernel::MainloopArguments{a_ptr, a_stride, b_ptr,
|
||||
b_stride};
|
||||
|
||||
typename GemmKernel::EpilogueArguments epilogue_args{
|
||||
Gemm::Epilogue::prepare_args(
|
||||
std::forward<EpilogueArgs>(epilogue_params)...),
|
||||
c_ptr, c_stride, c_ptr, c_stride};
|
||||
|
||||
c3x::cutlass_gemm_caller<GemmKernel>(a.device(), prob_shape, mainloop_args,
|
||||
epilogue_args);
|
||||
}
|
||||
|
||||
template <typename InType, typename OutType, bool EnableBias,
|
||||
typename... EpilogueArgs>
|
||||
inline void cutlass_gemm_sm100_fp8_dispatch(torch::Tensor& out,
|
||||
torch::Tensor const& a,
|
||||
torch::Tensor const& b,
|
||||
torch::Tensor const& a_scales,
|
||||
torch::Tensor const& b_scales,
|
||||
EpilogueArgs&&... args) {
|
||||
static_assert(std::is_same<InType, cutlass::float_e4m3_t>());
|
||||
TORCH_CHECK(a.dtype() == torch::kFloat8_e4m3fn);
|
||||
@ -81,55 +249,69 @@ inline void cutlass_gemm_sm100_fp8_dispatch(torch::Tensor& out,
|
||||
|
||||
using Cutlass3xGemmDefault =
|
||||
typename sm100_fp8_config_default<InType, OutType,
|
||||
Epilogue>::Cutlass3xGemm;
|
||||
using Cutlass3xGemmM16 =
|
||||
typename sm100_fp8_config_M16<InType, OutType, Epilogue>::Cutlass3xGemm;
|
||||
EnableBias>::Cutlass3xGemm;
|
||||
using Cutlass3xGemmM16SwapAB =
|
||||
typename sm100_fp8_config_M16_swap_ab<InType, OutType,
|
||||
EnableBias>::Cutlass3xGemm;
|
||||
using Cutlass3xGemmM64SwapAB =
|
||||
typename sm100_fp8_config_M64_swap_ab<InType, OutType,
|
||||
EnableBias>::Cutlass3xGemm;
|
||||
using Cutlass3xGemmM64 =
|
||||
typename sm100_fp8_config_M64<InType, OutType, Epilogue>::Cutlass3xGemm;
|
||||
typename sm100_fp8_config_M64<InType, OutType, EnableBias>::Cutlass3xGemm;
|
||||
|
||||
using Cutlass3xGemmM256 =
|
||||
typename sm100_fp8_config_M256<InType, OutType, Epilogue>::Cutlass3xGemm;
|
||||
typename sm100_fp8_config_M256<InType, OutType,
|
||||
EnableBias>::Cutlass3xGemm;
|
||||
|
||||
uint32_t const m = a.size(0);
|
||||
uint32_t const mp2 =
|
||||
std::max(static_cast<uint32_t>(16), next_pow_2(m)); // next power of 2
|
||||
uint32_t const k = a.size(1);
|
||||
|
||||
if (mp2 <= 16) {
|
||||
if (m <= 16) {
|
||||
// m in [1, 16]
|
||||
return cutlass_gemm_caller<Cutlass3xGemmM16>(
|
||||
out, a, b, std::forward<EpilogueArgs>(args)...);
|
||||
} else if (mp2 <= 64) {
|
||||
return cutlass_gemm_caller_sm100_fp8<Cutlass3xGemmM16SwapAB>(
|
||||
out, a, b, b_scales, a_scales, std::forward<EpilogueArgs>(args)...);
|
||||
} else if (m <= 64) {
|
||||
// m in (16, 64]
|
||||
return cutlass_gemm_caller<Cutlass3xGemmM64>(
|
||||
out, a, b, std::forward<EpilogueArgs>(args)...);
|
||||
} else if (mp2 <= 256) {
|
||||
if (m == 64 && k < 4096) {
|
||||
// do not enable swap AB
|
||||
return cutlass_gemm_caller_sm100_fp8<Cutlass3xGemmM64>(
|
||||
out, a, b, a_scales, b_scales, std::forward<EpilogueArgs>(args)...);
|
||||
}
|
||||
return cutlass_gemm_caller_sm100_fp8<Cutlass3xGemmM64SwapAB>(
|
||||
out, a, b, b_scales, a_scales, std::forward<EpilogueArgs>(args)...);
|
||||
|
||||
} else if (m <= 256) {
|
||||
// m in (64, 256]
|
||||
return cutlass_gemm_caller<Cutlass3xGemmM256>(
|
||||
out, a, b, std::forward<EpilogueArgs>(args)...);
|
||||
return cutlass_gemm_caller_sm100_fp8<Cutlass3xGemmM256>(
|
||||
out, a, b, a_scales, b_scales, std::forward<EpilogueArgs>(args)...);
|
||||
} else {
|
||||
// m in (256, inf)
|
||||
return cutlass_gemm_caller<Cutlass3xGemmDefault>(
|
||||
out, a, b, std::forward<EpilogueArgs>(args)...);
|
||||
return cutlass_gemm_caller_sm100_fp8<Cutlass3xGemmDefault>(
|
||||
out, a, b, a_scales, b_scales, std::forward<EpilogueArgs>(args)...);
|
||||
}
|
||||
}
|
||||
|
||||
template <template <typename, typename, typename> typename Epilogue,
|
||||
typename... EpilogueArgs>
|
||||
template <bool EnableBias, typename... EpilogueArgs>
|
||||
void cutlass_scaled_mm_sm100_fp8_epilogue(torch::Tensor& out,
|
||||
torch::Tensor const& a,
|
||||
torch::Tensor const& b,
|
||||
torch::Tensor const& a_scales,
|
||||
torch::Tensor const& b_scales,
|
||||
EpilogueArgs&&... epilogue_args) {
|
||||
TORCH_CHECK(a.dtype() == torch::kFloat8_e4m3fn);
|
||||
TORCH_CHECK(b.dtype() == torch::kFloat8_e4m3fn);
|
||||
|
||||
if (out.dtype() == torch::kBFloat16) {
|
||||
return cutlass_gemm_sm100_fp8_dispatch<cutlass::float_e4m3_t,
|
||||
cutlass::bfloat16_t, Epilogue>(
|
||||
out, a, b, std::forward<EpilogueArgs>(epilogue_args)...);
|
||||
cutlass::bfloat16_t, EnableBias>(
|
||||
out, a, b, a_scales, b_scales,
|
||||
std::forward<EpilogueArgs>(epilogue_args)...);
|
||||
} else {
|
||||
TORCH_CHECK(out.dtype() == torch::kFloat16);
|
||||
return cutlass_gemm_sm100_fp8_dispatch<cutlass::float_e4m3_t,
|
||||
cutlass::half_t, Epilogue>(
|
||||
out, a, b, std::forward<EpilogueArgs>(epilogue_args)...);
|
||||
cutlass::half_t, EnableBias>(
|
||||
out, a, b, a_scales, b_scales,
|
||||
std::forward<EpilogueArgs>(epilogue_args)...);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
#include <ATen/cuda/CUDAContext.h>
|
||||
#include <torch/all.h>
|
||||
#include <c10/cuda/CUDAGuard.h>
|
||||
|
||||
#include <cmath>
|
||||
|
||||
@ -275,6 +276,7 @@ void static_scaled_int8_quant(torch::Tensor& out, // [..., hidden_size]
|
||||
int const num_tokens = input.numel() / hidden_size;
|
||||
dim3 const grid(num_tokens);
|
||||
dim3 const block(std::min(hidden_size, 256));
|
||||
const at::cuda::OptionalCUDAGuard device_guard(device_of(input));
|
||||
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
|
||||
VLLM_DISPATCH_FLOATING_TYPES(
|
||||
input.scalar_type(), "static_scaled_int8_quant_kernel", [&] {
|
||||
@ -306,6 +308,7 @@ void dynamic_scaled_int8_quant(
|
||||
int const num_tokens = input.numel() / hidden_size;
|
||||
dim3 const grid(num_tokens);
|
||||
dim3 const block(std::min(hidden_size, 256));
|
||||
const at::cuda::OptionalCUDAGuard device_guard(device_of(input));
|
||||
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
|
||||
VLLM_DISPATCH_FLOATING_TYPES(
|
||||
input.scalar_type(), "dynamic_scaled_int8_quant_kernel", [&] {
|
||||
|
||||
@ -611,7 +611,11 @@ TORCH_LIBRARY_EXPAND(TORCH_EXTENSION_NAME, ops) {
|
||||
"Tensor? cache_indices,"
|
||||
"Tensor? has_initial_state,"
|
||||
"Tensor! ssm_states,"
|
||||
"int pad_slot_id) -> ()");
|
||||
"int pad_slot_id,"
|
||||
"int block_size,"
|
||||
"Tensor? block_idx_first_scheduled_token,"
|
||||
"Tensor? block_idx_last_scheduled_token,"
|
||||
"Tensor? initial_state_idx) -> ()");
|
||||
ops.impl("selective_scan_fwd", torch::kCUDA, &selective_scan_fwd);
|
||||
|
||||
// Hadamard transforms
|
||||
|
||||
@ -132,9 +132,7 @@ WORKDIR /workspace
|
||||
COPY requirements/common.txt requirements/common.txt
|
||||
COPY requirements/cuda.txt requirements/cuda.txt
|
||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
# TODO: remove apache-tvm-ffi once FlashInfer is fixed https://github.com/flashinfer-ai/flashinfer/issues/1962
|
||||
uv pip install --python /opt/venv/bin/python3 --pre apache-tvm-ffi==0.1.0b15 \
|
||||
&& uv pip install --python /opt/venv/bin/python3 -r requirements/cuda.txt \
|
||||
uv pip install --python /opt/venv/bin/python3 -r requirements/cuda.txt \
|
||||
--extra-index-url ${PYTORCH_CUDA_INDEX_BASE_URL}/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.')
|
||||
|
||||
# cuda arch list used by torch
|
||||
@ -356,16 +354,14 @@ RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
# Install vllm wheel first, so that torch etc will be installed.
|
||||
RUN --mount=type=bind,from=build,src=/workspace/dist,target=/vllm-workspace/dist \
|
||||
--mount=type=cache,target=/root/.cache/uv \
|
||||
# TODO: remove apache-tvm-ffi once FlashInfer is fixed https://github.com/flashinfer-ai/flashinfer/issues/1962
|
||||
uv pip install --system --pre apache-tvm-ffi==0.1.0b15 \
|
||||
&& uv pip install --system dist/*.whl --verbose \
|
||||
uv pip install --system dist/*.whl --verbose \
|
||||
--extra-index-url ${PYTORCH_CUDA_INDEX_BASE_URL}/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.')
|
||||
|
||||
# Install FlashInfer pre-compiled kernel cache and binaries
|
||||
# https://docs.flashinfer.ai/installation.html
|
||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
uv pip install --system flashinfer-cubin==0.4.1 \
|
||||
&& uv pip install --system flashinfer-jit-cache==0.4.1 \
|
||||
uv pip install --system flashinfer-cubin==0.5.2 \
|
||||
&& uv pip install --system flashinfer-jit-cache==0.5.2 \
|
||||
--extra-index-url https://flashinfer.ai/whl/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.') \
|
||||
&& flashinfer show-config
|
||||
|
||||
|
||||
@ -246,7 +246,7 @@ RUN pip install setuptools==75.6.0 packaging==23.2 ninja==1.11.1.3 build==1.2.2.
|
||||
|
||||
|
||||
# build flashinfer for torch nightly from source around 10 mins
|
||||
# release version: v0.4.1
|
||||
# release version: v0.5.2
|
||||
# todo(elainewy): cache flashinfer build result for faster build
|
||||
ENV CCACHE_DIR=/root/.cache/ccache
|
||||
RUN --mount=type=cache,target=/root/.cache/ccache \
|
||||
@ -254,7 +254,7 @@ RUN --mount=type=cache,target=/root/.cache/ccache \
|
||||
echo "git clone flashinfer..." \
|
||||
&& git clone --recursive https://github.com/flashinfer-ai/flashinfer.git \
|
||||
&& cd flashinfer \
|
||||
&& git checkout v0.4.1\
|
||||
&& git checkout v0.5.2 \
|
||||
&& git submodule update --init --recursive \
|
||||
&& echo "finish git clone flashinfer..." \
|
||||
&& rm -rf build \
|
||||
|
||||
@ -75,7 +75,6 @@ COPY --from=build_vllm ${COMMON_WORKDIR}/vllm /vllm-workspace
|
||||
RUN cd /vllm-workspace \
|
||||
&& rm -rf vllm \
|
||||
&& python3 -m pip install -e tests/vllm_test_utils \
|
||||
&& python3 -m pip install lm-eval[api]==0.4.4 \
|
||||
&& python3 -m pip install pytest-shard
|
||||
|
||||
# -----------------------
|
||||
|
||||
@ -14,7 +14,7 @@ ENV LANG=C.UTF-8 \
|
||||
|
||||
# Install development utilities
|
||||
RUN microdnf install -y \
|
||||
which procps findutils tar vim git gcc gcc-gfortran g++ make patch zlib-devel \
|
||||
which procps findutils tar vim git gcc-toolset-14 gcc-toolset-14-libatomic-devel patch zlib-devel \
|
||||
libjpeg-turbo-devel libtiff-devel libpng-devel libwebp-devel freetype-devel harfbuzz-devel \
|
||||
openssl-devel openblas openblas-devel autoconf automake libtool cmake numpy libsndfile \
|
||||
clang llvm-devel llvm-static clang-devel && \
|
||||
@ -85,40 +85,15 @@ RUN curl https://sh.rustup.rs -sSf | sh -s -- -y && \
|
||||
rustup default stable && \
|
||||
rustup show
|
||||
|
||||
FROM python-install AS torch
|
||||
ARG TORCH_VERSION=2.7.0
|
||||
ENV export _GLIBCXX_USE_CXX11_ABI=1
|
||||
ENV CARGO_HOME=/root/.cargo
|
||||
ENV RUSTUP_HOME=/root/.rustup
|
||||
ENV PATH="$CARGO_HOME/bin:$RUSTUP_HOME/bin:$PATH"
|
||||
|
||||
WORKDIR /tmp
|
||||
|
||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
--mount=type=bind,from=rust,source=/root/.cargo,target=/root/.cargo,rw \
|
||||
--mount=type=bind,from=rust,source=/root/.rustup,target=/root/.rustup,rw \
|
||||
git clone https://github.com/pytorch/pytorch.git && \
|
||||
cd pytorch && \
|
||||
git checkout v2.7.0 && \
|
||||
git submodule sync && \
|
||||
git submodule update --init --recursive && \
|
||||
uv pip install cmake ninja && \
|
||||
uv pip install -r requirements.txt && \
|
||||
python setup.py bdist_wheel
|
||||
|
||||
|
||||
FROM python-install AS torch-vision
|
||||
# Install torchvision
|
||||
ARG TORCH_VERSION=2.7.0
|
||||
ARG TORCH_VISION_VERSION=v0.20.1
|
||||
ARG TORCH_VISION_VERSION=v0.23.0
|
||||
WORKDIR /tmp
|
||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
--mount=type=bind,from=torch,source=/tmp/pytorch/dist,target=/tmp/torch-wheels/ \
|
||||
git clone https://github.com/pytorch/vision.git && \
|
||||
cd vision && \
|
||||
git checkout $TORCH_VISION_VERSION && \
|
||||
TORCH_WHL_FILE=$(ls /tmp/torch-wheels/*.whl | head -n 1) && \
|
||||
uv pip install -v $TORCH_WHL_FILE && \
|
||||
uv pip install torch==2.8.0 --index-url https://download.pytorch.org/whl/cpu && \
|
||||
python setup.py bdist_wheel
|
||||
|
||||
FROM python-install AS hf-xet-builder
|
||||
@ -199,26 +174,6 @@ RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
if ! grep '#include "dynamic_annotations.h"' numba/_dispatcher.cpp; then \
|
||||
sed -i '/#include "internal\/pycore_atomic.h"/i\#include "dynamic_annotations.h"' numba/_dispatcher.cpp; \
|
||||
fi && python setup.py bdist_wheel
|
||||
|
||||
# Edit aws-lc-sys to support s390x
|
||||
FROM python-install AS aws-lc-sys-editor
|
||||
WORKDIR /tmp
|
||||
ENV CARGO_HOME=/root/.cargo
|
||||
ENV RUSTUP_HOME=/root/.rustup
|
||||
ENV PATH="$CARGO_HOME/bin:$RUSTUP_HOME/bin:$PATH"
|
||||
ARG AWS_LC_VERSION=v0.30.0
|
||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
--mount=type=bind,from=rust,source=/root/.cargo,target=/root/.cargo,rw \
|
||||
--mount=type=bind,from=rust,source=/root/.rustup,target=/root/.rustup,rw \
|
||||
git clone --recursive https://github.com/aws/aws-lc-rs.git && \
|
||||
cd aws-lc-rs && \
|
||||
git checkout tags/aws-lc-sys/${AWS_LC_VERSION} && \
|
||||
git submodule sync && \
|
||||
git submodule update --init --recursive && \
|
||||
cd aws-lc-sys && \
|
||||
sed -i '682 s/strncmp(buf, "-----END ", 9)/memcmp(buf, "-----END ", 9)/' aws-lc/crypto/pem/pem_lib.c && \
|
||||
sed -i '712 s/strncmp(buf, "-----END ", 9)/memcmp(buf, "-----END ", 9)/' aws-lc/crypto/pem/pem_lib.c && \
|
||||
sed -i '747 s/strncmp(buf, "-----END ", 9)/memcmp(buf, "-----END ", 9)/' aws-lc/crypto/pem/pem_lib.c
|
||||
|
||||
# Build Outlines Core
|
||||
FROM python-install AS outlines-core-builder
|
||||
@ -226,17 +181,17 @@ WORKDIR /tmp
|
||||
ENV CARGO_HOME=/root/.cargo
|
||||
ENV RUSTUP_HOME=/root/.rustup
|
||||
ENV PATH="$CARGO_HOME/bin:$RUSTUP_HOME/bin:$PATH"
|
||||
ARG OUTLINES_CORE_VERSION=0.2.10
|
||||
COPY requirements/common.txt /tmp/requirements/common.txt
|
||||
ARG OUTLINES_CORE_VERSION
|
||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
--mount=type=bind,from=rust,source=/root/.cargo,target=/root/.cargo,rw \
|
||||
--mount=type=bind,from=rust,source=/root/.rustup,target=/root/.rustup,rw \
|
||||
--mount=type=bind,from=aws-lc-sys-editor,source=/tmp/aws-lc-rs/aws-lc-sys,target=/tmp/aws-lc-sys,rw \
|
||||
OUTLINES_CORE_VERSION=${OUTLINES_CORE_VERSION:-$(grep -E '^outlines_core\s*==\s*[0-9.]+' /tmp/requirements/common.txt | grep -Eo '[0-9.]+')} && \
|
||||
if [ -z "${OUTLINES_CORE_VERSION}" ]; then echo "ERROR: Could not determine outlines_core version"; exit 1; fi && \
|
||||
git clone https://github.com/dottxt-ai/outlines-core.git && \
|
||||
cd outlines-core && \
|
||||
git checkout tags/${OUTLINES_CORE_VERSION} && \
|
||||
sed -i "s/version = \"0.0.0\"/version = \"${OUTLINES_CORE_VERSION}\"/" Cargo.toml && \
|
||||
echo '[patch.crates-io]' >> Cargo.toml && \
|
||||
echo 'aws-lc-sys = { path = "/tmp/aws-lc-sys" }' >> Cargo.toml && \
|
||||
uv pip install maturin && \
|
||||
python -m maturin build --release --out dist
|
||||
|
||||
@ -245,13 +200,15 @@ FROM python-install AS vllm-cpu
|
||||
ARG PYTHON_VERSION
|
||||
|
||||
# Set correct library path for torch and numactl
|
||||
ENV LD_LIBRARY_PATH="/opt/vllm/lib64/python${PYTHON_VERSION}/site-packages/torch/lib:/usr/local/lib:$LD_LIBRARY_PATH"
|
||||
ENV LD_LIBRARY_PATH="/opt/vllm/lib64/python${PYTHON_VERSION}/site-packages/torch/lib:/usr/local/lib:/opt/rh/gcc-toolset-14/root/usr/lib64:$LD_LIBRARY_PATH"
|
||||
ENV C_INCLUDE_PATH="/usr/local/include:$C_INCLUDE_PATH"
|
||||
ENV UV_LINK_MODE=copy
|
||||
ENV CARGO_HOME=/root/.cargo
|
||||
ENV RUSTUP_HOME=/root/.rustup
|
||||
ENV PATH="$CARGO_HOME/bin:$RUSTUP_HOME/bin:$PATH"
|
||||
ENV GRPC_PYTHON_BUILD_SYSTEM_OPENSSL=1
|
||||
ENV PCP_DIR=/opt/rh/gcc-toolset-14/root
|
||||
ENV PKG_CONFIG_PATH="/opt/rh/gcc-toolset-14/root/usr/lib64/pkgconfig:/usr/local/lib/pkgconfig/"
|
||||
ENV PATH="${VIRTUAL_ENV:+${VIRTUAL_ENV}/bin}:/opt/rh/gcc-toolset-14/root/usr/bin:/usr/local/bin:$CARGO_HOME/bin:$RUSTUP_HOME/bin:$PATH"
|
||||
|
||||
COPY . /workspace/vllm
|
||||
WORKDIR /workspace/vllm
|
||||
@ -266,7 +223,6 @@ RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
--mount=type=bind,from=pyarrow,source=/tmp/arrow/python/dist,target=/tmp/arrow-wheels \
|
||||
--mount=type=bind,from=torch-vision,source=/tmp/vision/dist,target=/tmp/vision-wheels/ \
|
||||
--mount=type=bind,from=hf-xet-builder,source=/tmp/hf-xet/dist,target=/tmp/hf-xet-wheels/ \
|
||||
--mount=type=bind,from=torch,source=/tmp/pytorch/dist,target=/tmp/torch-wheels/ \
|
||||
--mount=type=bind,from=numba-builder,source=/tmp/llvmlite/dist,target=/tmp/llvmlite-wheels/ \
|
||||
--mount=type=bind,from=numba-builder,source=/tmp/numba/dist,target=/tmp/numba-wheels/ \
|
||||
--mount=type=bind,from=outlines-core-builder,source=/tmp/outlines-core/dist,target=/tmp/outlines-core/dist/ \
|
||||
@ -274,7 +230,6 @@ RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
ARROW_WHL_FILE=$(ls /tmp/arrow-wheels/pyarrow-*.whl) && \
|
||||
VISION_WHL_FILE=$(ls /tmp/vision-wheels/*.whl) && \
|
||||
HF_XET_WHL_FILE=$(ls /tmp/hf-xet-wheels/*.whl) && \
|
||||
TORCH_WHL_FILE=$(ls /tmp/torch-wheels/*.whl) && \
|
||||
LLVM_WHL_FILE=$(ls /tmp/llvmlite-wheels/*.whl) && \
|
||||
NUMBA_WHL_FILE=$(ls /tmp/numba-wheels/*.whl) && \
|
||||
OUTLINES_CORE_WHL_FILE=$(ls /tmp/outlines-core/dist/*.whl) && \
|
||||
@ -282,7 +237,6 @@ RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
$ARROW_WHL_FILE \
|
||||
$VISION_WHL_FILE \
|
||||
$HF_XET_WHL_FILE \
|
||||
$TORCH_WHL_FILE \
|
||||
$LLVM_WHL_FILE \
|
||||
$NUMBA_WHL_FILE \
|
||||
$OUTLINES_CORE_WHL_FILE \
|
||||
|
||||
@ -54,7 +54,7 @@ ENV VLLM_WORKER_MULTIPROC_METHOD=spawn
|
||||
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
--mount=type=bind,source=.git,target=.git \
|
||||
python3 setup.py install
|
||||
pip install --no-build-isolation .
|
||||
|
||||
CMD ["/bin/bash"]
|
||||
|
||||
@ -64,9 +64,6 @@ FROM vllm-base AS vllm-openai
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
pip install accelerate hf_transfer pytest pytest_asyncio lm_eval[api] modelscope
|
||||
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
pip uninstall oneccl oneccl-devel -y
|
||||
|
||||
# install development dependencies (for testing)
|
||||
RUN python3 -m pip install -e tests/vllm_test_utils
|
||||
|
||||
@ -74,4 +71,7 @@ RUN python3 -m pip install -e tests/vllm_test_utils
|
||||
RUN python3 /workspace/vllm/tools/install_nixl_from_source_ubuntu.py
|
||||
ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/local/lib/python3.12/dist-packages/.nixl.mesonpy.libs/plugins/"
|
||||
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
pip uninstall oneccl oneccl-devel -y
|
||||
|
||||
ENTRYPOINT ["vllm", "serve"]
|
||||
|
||||
@ -56,7 +56,7 @@ vLLM is flexible and easy to use with:
|
||||
- Tensor, pipeline, data and expert parallelism support for distributed inference
|
||||
- Streaming outputs
|
||||
- OpenAI-compatible API server
|
||||
- Support for NVIDIA GPUs, AMD CPUs and GPUs, Intel CPUs and GPUs, PowerPC CPUs, and TPU. Additionally, support for diverse hardware plugins such as Intel Gaudi, IBM Spyre and Huawei Ascend.
|
||||
- Support for NVIDIA GPUs, AMD CPUs and GPUs, Intel CPUs and GPUs, PowerPC CPUs, Arm CPUs, and TPU. Additionally, support for diverse hardware plugins such as Intel Gaudi, IBM Spyre and Huawei Ascend.
|
||||
- Prefix caching support
|
||||
- Multi-LoRA support
|
||||
|
||||
|
||||
BIN
docs/assets/design/debug_vllm_compile/design_diagram.png
Normal file
BIN
docs/assets/design/debug_vllm_compile/design_diagram.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 314 KiB |
BIN
docs/assets/design/debug_vllm_compile/dynamic_shapes.png
Normal file
BIN
docs/assets/design/debug_vllm_compile/dynamic_shapes.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 359 KiB |
BIN
docs/assets/design/debug_vllm_compile/tlparse_inductor.png
Normal file
BIN
docs/assets/design/debug_vllm_compile/tlparse_inductor.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 257 KiB |
@ -2,6 +2,7 @@
|
||||
|
||||
We host regular meetups in San Francisco Bay Area every 2 months. We will share the project updates from the vLLM team and have guest speakers from the industry to share their experience and insights. Please find the materials of our previous meetups below:
|
||||
|
||||
- [vLLM Beijing Meetup](https://mp.weixin.qq.com/s/xSrYXjNgr1HbCP4ExYNG1w), November 1st 2025. [[Slides]](https://drive.google.com/drive/folders/1nQJ8ZkLSjKxvu36sSHaceVXtttbLvvu-?usp=drive_link)
|
||||
- [vLLM Shanghai Meetup](https://mp.weixin.qq.com/s/__xb4OyOsImz-9eAVrdlcg), October 25th 2025. [[Slides]](https://drive.google.com/drive/folders/1KqwjsFJLfEsC8wlDugnrR61zsWHt94Q6)
|
||||
- [vLLM Toronto Meetup](https://luma.com/e80e0ymm), September 25th 2025. [[Slides]](https://docs.google.com/presentation/d/1IYJYmJcu9fLpID5N5RbW_vO0XLo0CGOR14IXOjB61V8/edit?usp=sharing)
|
||||
- [vLLM Shenzhen Meetup](https://mp.weixin.qq.com/s/k8ZBO1u2_2odgiKWH_GVTQ), August 30th 2025. [[Slides]](https://drive.google.com/drive/folders/1Ua2SVKVSu-wp5vou_6ElraDt2bnKhiEA)
|
||||
|
||||
@ -4,7 +4,7 @@ This doc serves as a collection of handy tips for optimizing your vLLM on TPU wo
|
||||
|
||||
## Get started
|
||||
|
||||
Looking for setup and installation instructions? Find them [here](../getting_started/installation/google_tpu.md).
|
||||
Looking for setup and installation instructions? Find them [here](https://docs.vllm.ai/projects/tpu/en/latest/getting_started/installation/).
|
||||
|
||||
### TPU workload sizing
|
||||
|
||||
|
||||
@ -39,7 +39,7 @@ Refer to [examples/offline_inference/simple_profiling.py](../../examples/offline
|
||||
|
||||
```bash
|
||||
VLLM_TORCH_PROFILER_DIR=./vllm_profile \
|
||||
vllm serve meta-llama/Meta-Llama-3-70B
|
||||
vllm serve meta-llama/Llama-3.1-8B-Instruct
|
||||
```
|
||||
|
||||
vllm bench command:
|
||||
@ -47,7 +47,7 @@ vllm bench command:
|
||||
```bash
|
||||
vllm bench serve \
|
||||
--backend vllm \
|
||||
--model meta-llama/Meta-Llama-3-70B \
|
||||
--model meta-llama/Llama-3.1-8B-Instruct \
|
||||
--dataset-name sharegpt \
|
||||
--dataset-path sharegpt.json \
|
||||
--profile \
|
||||
@ -70,18 +70,21 @@ apt update
|
||||
apt install nsight-systems-cli
|
||||
```
|
||||
|
||||
### Example commands and usage
|
||||
!!! tip
|
||||
When profiling with `nsys`, it is advisable to set the environment variable `VLLM_WORKER_MULTIPROC_METHOD=spawn`. The default is to use the `fork` method instead of `spawn`. More information on the topic can be found in the [Nsight Systems release notes](https://docs.nvidia.com/nsight-systems/ReleaseNotes/index.html#general-issues).
|
||||
|
||||
When profiling with `nsys`, it is advisable to set the environment variable `VLLM_WORKER_MULTIPROC_METHOD=spawn`. The default is to use the `fork` method instead of `spawn`. More information on the topic can be found in the [Nsight Systems release notes](https://docs.nvidia.com/nsight-systems/ReleaseNotes/index.html#general-issues).
|
||||
The Nsight Systems profiler can be launched with `nsys profile ...`, with a few recommended flags for vLLM: `--trace-fork-before-exec=true --cuda-graph-trace=node`.
|
||||
|
||||
### Example commands and usage
|
||||
|
||||
#### Offline Inference
|
||||
|
||||
For basic usage, you can just append `nsys profile -o report.nsys-rep --trace-fork-before-exec=true --cuda-graph-trace=node` before any existing script you would run for offline inference.
|
||||
For basic usage, you can just append the profiling command before any existing script you would run for offline inference.
|
||||
|
||||
The following is an example using the `vllm bench latency` script:
|
||||
|
||||
```bash
|
||||
nsys profile -o report.nsys-rep \
|
||||
nsys profile \
|
||||
--trace-fork-before-exec=true \
|
||||
--cuda-graph-trace=node \
|
||||
vllm bench latency \
|
||||
@ -95,40 +98,29 @@ vllm bench latency \
|
||||
|
||||
#### OpenAI Server
|
||||
|
||||
To profile the server, you will want to prepend your `vllm serve` command with `nsys profile` just like for offline inference, however you must specify `--delay XX --duration YY` parameters according to the needs of your benchmark. After the duration time has been used up, the server will be killed.
|
||||
To profile the server, you will want to prepend your `vllm serve` command with `nsys profile` just like for offline inference, but you will need to specify a few other arguments to enable dynamic capture similarly to the Torch Profiler:
|
||||
|
||||
```bash
|
||||
# server
|
||||
nsys profile -o report.nsys-rep \
|
||||
VLLM_TORCH_CUDA_PROFILE=1 \
|
||||
nsys profile \
|
||||
--trace-fork-before-exec=true \
|
||||
--cuda-graph-trace=node \
|
||||
--delay 30 \
|
||||
--duration 60 \
|
||||
--capture-range=cudaProfilerApi \
|
||||
--capture-range-end repeat \
|
||||
vllm serve meta-llama/Llama-3.1-8B-Instruct
|
||||
|
||||
# client
|
||||
vllm bench serve \
|
||||
--backend vllm \
|
||||
--model meta-llama/Llama-3.1-8B-Instruct \
|
||||
--num-prompts 1 \
|
||||
--dataset-name random \
|
||||
--random-input 1024 \
|
||||
--random-output 512
|
||||
--dataset-name sharegpt \
|
||||
--dataset-path sharegpt.json \
|
||||
--profile \
|
||||
--num-prompts 2
|
||||
```
|
||||
|
||||
In practice, you should set the `--duration` argument to a large value. Whenever you want the server to stop profiling, run:
|
||||
|
||||
```bash
|
||||
nsys sessions list
|
||||
```
|
||||
|
||||
to get the session id in the form of `profile-XXXXX`, then run:
|
||||
|
||||
```bash
|
||||
nsys stop --session=profile-XXXXX
|
||||
```
|
||||
|
||||
to manually kill the profiler and generate your `nsys-rep` report.
|
||||
With `--profile`, vLLM will capture a profile for each run of `vllm bench serve`. Once the server is killed, the profiles will all be saved.
|
||||
|
||||
#### Analysis
|
||||
|
||||
|
||||
@ -13,7 +13,7 @@ Before you begin, ensure that you have the following:
|
||||
- A running Kubernetes cluster
|
||||
- NVIDIA Kubernetes Device Plugin (`k8s-device-plugin`): This can be found at [https://github.com/NVIDIA/k8s-device-plugin](https://github.com/NVIDIA/k8s-device-plugin)
|
||||
- Available GPU resources in your cluster
|
||||
- An S3 with the model which will be deployed
|
||||
- (Optional) An S3 bucket or other storage with the model weights, if using automatic model download
|
||||
|
||||
## Installing the chart
|
||||
|
||||
@ -61,10 +61,16 @@ The following table describes configurable parameters of the chart in `values.ya
|
||||
| deploymentStrategy | object | {} | Deployment strategy configuration |
|
||||
| externalConfigs | list | [] | External configuration |
|
||||
| extraContainers | list | [] | Additional containers configuration |
|
||||
| extraInit | object | {"pvcStorage":"1Gi","s3modelpath":"relative_s3_model_path/opt-125m", "awsEc2MetadataDisabled": true} | Additional configuration for the init container |
|
||||
| extraInit.pvcStorage | string | "1Gi" | Storage size of the s3 |
|
||||
| extraInit.s3modelpath | string | "relative_s3_model_path/opt-125m" | Path of the model on the s3 which hosts model weights and config files |
|
||||
| extraInit.awsEc2MetadataDisabled | boolean | true | Disables the use of the Amazon EC2 instance metadata service |
|
||||
| extraInit | object | {"modelDownload":{"enabled":true},"initContainers":[],"pvcStorage":"1Gi"} | Additional configuration for init containers |
|
||||
| extraInit.modelDownload | object | {"enabled":true} | Model download functionality configuration |
|
||||
| extraInit.modelDownload.enabled | bool | true | Enable automatic model download job and wait container |
|
||||
| extraInit.modelDownload.image | object | {"repository":"amazon/aws-cli","tag":"2.6.4","pullPolicy":"IfNotPresent"} | Image for model download operations |
|
||||
| extraInit.modelDownload.waitContainer | object | {} | Wait container configuration (command, args, env) |
|
||||
| extraInit.modelDownload.downloadJob | object | {} | Download job configuration (command, args, env) |
|
||||
| extraInit.initContainers | list | [] | Custom init containers (appended after model download if enabled) |
|
||||
| extraInit.pvcStorage | string | "1Gi" | Storage size for the PVC |
|
||||
| extraInit.s3modelpath | string | "relative_s3_model_path/opt-125m" | (Optional) Path of the model on S3 |
|
||||
| extraInit.awsEc2MetadataDisabled | bool | true | (Optional) Disable AWS EC2 metadata service |
|
||||
| extraPorts | list | [] | Additional ports configuration |
|
||||
| gpuModels | list | ["TYPE_GPU_USED"] | Type of gpu used |
|
||||
| image | object | {"command":["vllm","serve","/data/","--served-model-name","opt-125m","--host","0.0.0.0","--port","8000"],"repository":"vllm/vllm-openai","tag":"latest"} | Image configuration |
|
||||
@ -98,3 +104,36 @@ The following table describes configurable parameters of the chart in `values.ya
|
||||
| serviceName | string | "" | Service name |
|
||||
| servicePort | int | 80 | Service port |
|
||||
| labels.environment | string | test | Environment name |
|
||||
|
||||
## Configuration Examples
|
||||
|
||||
### Using S3 Model Download (Default)
|
||||
|
||||
```yaml
|
||||
extraInit:
|
||||
modelDownload:
|
||||
enabled: true
|
||||
pvcStorage: "10Gi"
|
||||
s3modelpath: "models/llama-7b"
|
||||
```
|
||||
|
||||
### Using Custom Init Containers Only
|
||||
|
||||
For use cases like llm-d where you need custom sidecars without model download:
|
||||
|
||||
```yaml
|
||||
extraInit:
|
||||
modelDownload:
|
||||
enabled: false
|
||||
initContainers:
|
||||
- name: llm-d-routing-proxy
|
||||
image: ghcr.io/llm-d/llm-d-routing-sidecar:v0.2.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
name: proxy
|
||||
securityContext:
|
||||
runAsUser: 1000
|
||||
restartPolicy: Always
|
||||
pvcStorage: "10Gi"
|
||||
```
|
||||
|
||||
239
docs/design/debug_vllm_compile.md
Normal file
239
docs/design/debug_vllm_compile.md
Normal file
@ -0,0 +1,239 @@
|
||||
# How to debug the vLLM-torch.compile integration
|
||||
|
||||
TL;DR:
|
||||
|
||||
- use tlparse to acquire torch.compile logs. Include these logs in bug reports and/or support asks.
|
||||
- The vLLM-torch.compile integration is multiple pieces. vLLM exposes flags to turn off each piece:
|
||||
|
||||
| Online Flag | Offline Flag | Result |
|
||||
|----------|----------|-------------|
|
||||
| --enforce-eager | enforce_eager=True | Turn off torch.compile and CUDAGraphs |
|
||||
| -O.mode=0 | mode=CompilationMode.NONE | Turn off torch.compile only |
|
||||
| -O.cudagraph_mode=NONE | compilation_config=CompilationConfig(mode=CompilationMode.NONE) | Turn off CUDAGraphs only |
|
||||
| -O.backend=eager | compilation_config=CompilationConfig(backend='eager') | Turn off TorchInductor |
|
||||
|
||||
## vLLM-torch.compile overview
|
||||
|
||||
To improve performance, vLLM leverages torch.compile and CUDAGraphs to speed things up.
|
||||
torch.compile generates optimized kernels for PyTorch code while CUDAGraphs eliminates overhead.
|
||||
Most notably, vLLM-compile is NOT torch.compile, it is a custom compiler built using internal PyTorch Compile APIs.
|
||||
|
||||

|
||||
|
||||
- Given a model, we do a full graph capture via TorchDynamo that is dynamic on the batch size (number of tokens)
|
||||
- vLLM then optionally splits and/or specializes this graph and then uses TorchInductor to compile each graph into a compiled artifact.
|
||||
This step may use vLLM custom Inductor passes to further optimize the graph.
|
||||
- The compiled artifact is saved to vLLM's compile cache so that it can be loaded in the future.
|
||||
- vLLM applies CUDAGraphs to reduce CPU overheads.
|
||||
|
||||
Things can go wrong in each of the four steps. When something does go wrong, please try to isolate the subsystem
|
||||
that went wrong -- this will allow you to turn off the minimal number of things to keep reliability
|
||||
goals while minimizing impact to performance and also helps us (vLLM) when you open a bug report.
|
||||
|
||||
For more details on the design, please see the following resources:
|
||||
|
||||
- [Introduction to vLLM-torch.compile blogpost](https://blog.vllm.ai/2025/08/20/torch-compile.html)
|
||||
- [vLLM-torch.compile integration design](https://docs.vllm.ai/en/latest/design/torch_compile.html)
|
||||
- [vLLM Office Hours #26](https://www.youtube.com/live/xLyxc7hxCJc?si=Xulo9pe53C6ywf0V&t=561)
|
||||
- [Talk at PyTorch Conference 2025](https://youtu.be/1wV1ESbGrVQ?si=s1GqymUfwiwOrDTg&t=725)
|
||||
|
||||
## Use tlparse
|
||||
|
||||
Use [tlparse](https://github.com/meta-pytorch/tlparse) to acquire torch.compile logs. These logs show all stages of the compilation process,
|
||||
including the fused kernels that torch.compile produces.
|
||||
If you can, we recommend sending these or pieces of these along with any bug reports --
|
||||
they are very helpful.
|
||||
|
||||
Install tlparse:
|
||||
|
||||
```sh
|
||||
pip install tlparse
|
||||
```
|
||||
|
||||
Usage (offline inference)
|
||||
|
||||
```sh
|
||||
TORCH_TRACE=~/trace_dir python my_script.py
|
||||
tlparse ~/trace_dir/<the_first_log_file>
|
||||
```
|
||||
|
||||
Usage (serving)
|
||||
|
||||
```sh
|
||||
TORCH_TRACE=~/trace_dir vllm serve
|
||||
# ctrl-c out of the server
|
||||
tlparse ~/trace_dir/<the_first_log_file>
|
||||
```
|
||||
|
||||
The `tlparse` command outputs some HTML files (perhaps into e.g. `./tl_out/index.html`).
|
||||
Open it to see the logs. It'll look something like the following:
|
||||
|
||||

|
||||
|
||||
## Turn off vLLM-torch.compile integration
|
||||
|
||||
Pass `--enforce-eager` to turn off the vLLM-torch.compile integration and run entirely
|
||||
in eager mode. This includes turning off CUDAGraphs.
|
||||
|
||||
```sh
|
||||
# Online
|
||||
vllm serve --enforce-eager
|
||||
```
|
||||
|
||||
```py
|
||||
# Offline
|
||||
LLM(model, enforce_eager=True)
|
||||
```
|
||||
|
||||
To turn off just torch.compile, pass `mode = NONE` to the compilation config.
|
||||
(`-O` is short for `--compilation_config`):
|
||||
|
||||
```sh
|
||||
# Online
|
||||
vllm serve -O.mode=0
|
||||
```
|
||||
|
||||
```py
|
||||
# Offline
|
||||
from vllm.config.compilation import CompilationConfig, CompilationMode
|
||||
LLM(model, compilation_config=CompilationConfig(mode=CompilationMode.NONE))
|
||||
```
|
||||
|
||||
To turn off just CUDAGraphs, pass `cudagraph_mode = NONE`:
|
||||
|
||||
```sh
|
||||
# Online
|
||||
vllm serve -O.cudagraph_mode=NONE
|
||||
```
|
||||
|
||||
```py
|
||||
# Offline
|
||||
from vllm.config.compilation import CompilationConfig, CUDAGraphMode
|
||||
LLM(model, compilation_config=CompilationConfig(cudagraph_mode=CUDAGraphMode.NONE))
|
||||
```
|
||||
|
||||
## Debugging TorchDynamo
|
||||
|
||||
vLLM requires model code be capturable into a full graph via TorchDynamo (torch.compile's frontend).
|
||||
TorchDynamo does not support all of Python. It will error (in fullgraph mode) if it cannot support
|
||||
a feature (this is sometimes known as a graph break).
|
||||
|
||||
If you encounter a graph break, please [open an issue to pytorch/pytorch](https://github.com/pytorch/pytorch) so the PyTorch devs can prioritize.
|
||||
Then, try your best to rewrite the code to avoid the graph break.
|
||||
For more information, see this [Dynamo guide](https://docs.pytorch.org/docs/stable/compile/programming_model.dynamo_core_concepts.html).
|
||||
|
||||
## Debugging Dynamic Shape full graph capture
|
||||
|
||||
vLLM requires that the model's forward pass be capturable into a full graph that is dynamic
|
||||
on the batch size (i.e. the number of tokens). It (by default) compiles this one graph into
|
||||
one artifact and uses this artifact for all batch sizes.
|
||||
|
||||
If your code cannot be captured with Dynamic Shapes, you may see silent incorrectness,
|
||||
loud errors, or CUDA illegal memory accesses. For example, the following is not
|
||||
capturable into a single graph:
|
||||
|
||||
```py
|
||||
if data.size[0] % 128 == 0:
|
||||
foo(...)
|
||||
else:
|
||||
bar(...)
|
||||
```
|
||||
|
||||
This problem is easy to diagnose. Use tlparse and click on `compilation_metrics`:
|
||||
it will tell you symbolic constraints on the batch size. If there is any constraint
|
||||
that restricts the batch sizes, then we've got a problem.
|
||||
|
||||

|
||||
|
||||
To avoid this, please either:
|
||||
|
||||
1. avoid branching on the number of tokens
|
||||
2. wrap the branching logic into a custom operator. TorchDynamo does not
|
||||
trace into custom operators.
|
||||
|
||||
## Debugging TorchInductor
|
||||
|
||||
TorchInductor takes a captured graph and then compiles it down to some Python code
|
||||
that may call 1+ triton kernels. On rare (but unfortunate) occasions, it may
|
||||
produce an incorrect triton kernel. This may manifest as silent incorrectness,
|
||||
CUDA illegal memory accesses, or loud errors.
|
||||
|
||||
To debug if TorchInductor is at fault, you can disable it by passing `backend='eager'`
|
||||
to the compilation config:
|
||||
|
||||
```sh
|
||||
# online
|
||||
vllm serve -O.backend=eager
|
||||
```
|
||||
|
||||
```py
|
||||
# offline
|
||||
LLM(compilation_config=CompilationConfig(backend='eager'))
|
||||
```
|
||||
|
||||
If Inductor is at fault, [file a bug to PyTorch](https://github.com/pytorch/pytorch).
|
||||
If you're feeling adventurous, you can debug the triton kernels in the Inductor output code
|
||||
(that you can locate via using tlparse).
|
||||
|
||||

|
||||
|
||||
You can also use `TORCH_LOGS=output_code <command>` to print the Inductor output code.
|
||||
|
||||
### Editable TorchInductor code
|
||||
|
||||
You can edit the TorchInductor code that gets run by setting `VLLM_COMPILE_CACHE_SAVE_FORMAT=unpacked`
|
||||
or passing `-O.compile_cache_save_format=unpacked`. The default is `binary`, which means it is not editable.
|
||||
|
||||
This is a useful technique: you can put breakpoints (e.g. `torch.distributed.breakpoint()`)
|
||||
and print statements in the output code.
|
||||
|
||||
## Debugging vLLM-compile cache
|
||||
|
||||
vLLM built its own cache for torch.compile artifacts. The idea is that the artifacts
|
||||
can be compiled once and then reused after they have been compiled. This
|
||||
is a layer on top of [torch.compile's compiler cache](https://docs.pytorch.org/tutorials/recipes/torch_compile_caching_tutorial.html).
|
||||
|
||||
While torch.compile's compiler cache is rock-stable, vLLM's compiler cache is unfortunately
|
||||
not always correct. You can disable it via setting `VLLM_DISABLE_COMPILE_CACHE=1`.
|
||||
|
||||
You can also manually remove this cache.
|
||||
|
||||
- Remove vLLM's compile cache with `rm -rf ~/.cache/vllm` (look at logs to see if the location changed)
|
||||
- Remove torch.compile's built-in caches with `rm -rf /tmp/torchinductor_$(whoami)`
|
||||
|
||||
vLLM's cache is a mapping from cache key to a compiled artifact. vLLM computes
|
||||
the cache key via combining multiple factors (e.g. config flags and model name).
|
||||
If vLLM's compile cache is wrong, this usually means that a factor is missing.
|
||||
Please see [this example](https://github.com/vllm-project/vllm/blob/18b39828d90413d05d770dfd2e2f48304f4ca0eb/vllm/config/model.py#L310)
|
||||
of how vLLM computes part of the cache key.
|
||||
|
||||
## Debugging CUDAGraphs
|
||||
|
||||
CUDAGraphs is a feature that allows one to:
|
||||
|
||||
- Capture a callable that launches 1+ CUDA kernels into a CUDAGraph
|
||||
- Replay the CUDAGraph
|
||||
|
||||
The captured CUDAGraph contains all of the memory used during the capture process.
|
||||
The replay of the CUDAGraph reads and writes to exactly the same regions of memory.
|
||||
|
||||
This leads to some restrictions:
|
||||
|
||||
1. In order to use CUDAGraphs on new data, you'll need to copy the data into a buffer
|
||||
that the CUDAGraph is reading from
|
||||
2. CUDAGraphs only capture CUDA kernels, they don't capture work done on CPU.
|
||||
|
||||
vLLM uses the raw CUDAGraphs API, which is unsafe when used incorrectly.
|
||||
|
||||
To turn off just CUDAGraphs, pass `cudagraph_mode = NONE`:
|
||||
|
||||
```sh
|
||||
# Online
|
||||
vllm serve -O.cudagraph_mode=NONE
|
||||
```
|
||||
|
||||
```py
|
||||
# Offline
|
||||
from vllm.config.compilation import CompilationConfig, CUDAGraphMode
|
||||
LLM(model, compilation_config=CompilationConfig(cudagraph_mode=CUDAGraphMode.NONE))
|
||||
```
|
||||
@ -254,7 +254,15 @@ The previous sections alluded to the interfaces which vLLM logits processors mus
|
||||
changes to the batch makeup.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
@classmethod
|
||||
def validate_params(cls, sampling_params: SamplingParams):
|
||||
"""Validate sampling params for this logits processor.
|
||||
|
||||
Raise ValueError for invalid ones.
|
||||
"""
|
||||
return None
|
||||
|
||||
```
|
||||
|
||||
A vLLM logits processor must subclass `LogitsProcessor` and define (at minimum) the following methods:
|
||||
@ -279,6 +287,10 @@ A vLLM logits processor must subclass `LogitsProcessor` and define (at minimum)
|
||||
* Use the `BatchUpdate` members to update logits processor internal state
|
||||
* **Note:** batch update data structure may be `None`, signaling no change to the batch constituents. In this case, the LogitsProcessor might still want to update its state based on the updated `output_token_ids` lists that it could have retained when they were added.
|
||||
|
||||
* `validate_params(cls, sampling_params: SamplingParams)`:
|
||||
* Raise `ValueError` if `SamplingParams` has invalid arguments (especially custom arguments) used by logits processor.
|
||||
* When request is sent to entrypoint, `validate_params()` will validate `SamplingParams` and refuse request with invalid arguments.
|
||||
|
||||
### `BatchUpdate` data structure
|
||||
|
||||
The `BatchUpdate` abstraction models the persistent batch as a list of requests, supporting the following operations to change batch state (note that the order in which the operations are mentioned below reflects the order in which they should be processed in `update_state()`):
|
||||
|
||||
@ -97,7 +97,7 @@ To be used with a particular `FusedMoEPrepareAndFinalize` sub-class, MoE kernels
|
||||
| trtllm | standard | mxfp4,</br>nvfp4 | G(16),G(32) | <sup>5</sup> | N | Y | [`TrtLlmGenExperts`][vllm.model_executor.layers.fused_moe.trtllm_moe.TrtLlmGenExperts] |
|
||||
| pallas | standard | N/A | N/A | silu | N | N | [`fused_moe`][vllm.model_executor.layers.fused_moe.moe_pallas.fused_moe] |
|
||||
| iterative | standard | N/A | N/A | silu | N | N | [`fused_moe`][vllm.model_executor.layers.fused_moe.moe_torch_iterative.fused_moe] |
|
||||
| rocm aiter moe | standard | fp8 | G(128),A,T | silu, gelu | Y | N | [`rocm_aiter_fused_experts`][vllm.model_executor.layers.fused_moe.rocm_aiter_fused_moe.rocm_aiter_fused_moe_impl] |
|
||||
| rocm aiter moe | standard | fp8 | G(128),A,T | silu, gelu | Y | N | [`rocm_aiter_fused_experts`][vllm.model_executor.layers.fused_moe.rocm_aiter_fused_moe.rocm_aiter_fused_experts] |
|
||||
| cpu_fused_moe | standard | N/A | N/A | silu | N | N | [`CPUFusedMOE`][vllm.model_executor.layers.fused_moe.cpu_fused_moe.CPUFusedMOE] |
|
||||
| naive batched<sup>4</sup> | batched | int8,</br>fp8 | G,A,T | silu, gelu | <sup>6</sup> | Y | [`NaiveBatchedExperts`][vllm.model_executor.layers.fused_moe.fused_batched_moe.NaiveBatchedExperts] |
|
||||
|
||||
|
||||
@ -27,6 +27,8 @@ With all these factors taken into consideration, usually we can guarantee that t
|
||||
|
||||
A unique aspect of vLLM's `torch.compile` integration, is that we guarantee all the compilation finishes before we serve any requests. No requests will trigger new compilations. Otherwise, the engine would be blocked on that request, and the response time will have unexpected spikes.
|
||||
|
||||
By default, the cache saves compiled artifacts as binary files. If you would like to interact with the generated code for debugging purposes, set the field `compile_cache_save_format=unpacked` in the compilation config, or omit this and set the env variable `VLLM_COMPILE_CACHE_SAVE_FORMAT=unpacked`.
|
||||
|
||||
## Python Code Compilation
|
||||
|
||||
In the very verbose logs, we can see:
|
||||
|
||||
133
docs/features/batch_invariance.md
Normal file
133
docs/features/batch_invariance.md
Normal file
@ -0,0 +1,133 @@
|
||||
# Batch Invariance
|
||||
|
||||
!!! note
|
||||
Batch invariance is currently in beta. Some features are still under active development.
|
||||
Track progress and planned improvements at <https://github.com/vllm-project/vllm/issues/27433>
|
||||
|
||||
This document shows how to enable batch invariance in vLLM. Batch invariance ensures that the output of a model is deterministic and independent of the batch size or the order of requests in a batch.
|
||||
|
||||
## Motivation
|
||||
|
||||
Batch invariance is crucial for several use cases:
|
||||
|
||||
- **Framework debugging**: Deterministic outputs make it easier to debug issues in the inference framework, as the same input will always produce the same output regardless of batching.
|
||||
- **Model debugging**: Helps identify issues in model implementations by ensuring consistent behavior across different batch configurations.
|
||||
- **Reinforcement Learning (RL)**: RL training often requires deterministic rollouts for reproducibility and stable training.
|
||||
- **Large-scale inference systems**: Systems that use vLLM as a component benefit from deterministic behavior for testing, validation, and consistency guarantees.
|
||||
|
||||
## Hardware Requirements
|
||||
|
||||
Batch invariance currently requires NVIDIA GPUs with compute capability 9.0 or higher:
|
||||
|
||||
- **H-series**: H100, H200
|
||||
- **B-series**: B100, B200
|
||||
|
||||
## Enabling Batch Invariance
|
||||
|
||||
Batch invariance can be enabled by setting the `VLLM_BATCH_INVARIANT` environment variable to `1`:
|
||||
|
||||
```bash
|
||||
export VLLM_BATCH_INVARIANT=1
|
||||
```
|
||||
|
||||
### Online Inference (Server Mode)
|
||||
|
||||
To start a vLLM server with batch invariance enabled:
|
||||
|
||||
```bash
|
||||
VLLM_BATCH_INVARIANT=1 vllm serve meta-llama/Llama-3.1-8B-Instruct
|
||||
```
|
||||
|
||||
Then use the OpenAI-compatible client:
|
||||
|
||||
```python
|
||||
from openai import OpenAI
|
||||
|
||||
client = OpenAI(
|
||||
api_key="EMPTY",
|
||||
base_url="http://localhost:8000/v1",
|
||||
)
|
||||
|
||||
# These requests will produce deterministic outputs
|
||||
# regardless of batch size or order
|
||||
response = client.completions.create(
|
||||
model="meta-llama/Llama-3.1-8B-Instruct",
|
||||
prompt="The future of AI is",
|
||||
max_tokens=100,
|
||||
temperature=0.7,
|
||||
seed=42,
|
||||
)
|
||||
|
||||
print(response.choices[0].text)
|
||||
```
|
||||
|
||||
### Offline Inference
|
||||
|
||||
For offline batch inference with batch invariance:
|
||||
|
||||
```python
|
||||
import os
|
||||
os.environ["VLLM_BATCH_INVARIANT"] = "1"
|
||||
|
||||
from vllm import LLM, SamplingParams
|
||||
|
||||
prompts = [
|
||||
"The future of AI is",
|
||||
"Machine learning enables",
|
||||
"Deep learning models can",
|
||||
]
|
||||
|
||||
sampling_params = SamplingParams(
|
||||
temperature=0.7,
|
||||
top_p=0.95,
|
||||
max_tokens=100,
|
||||
seed=42,
|
||||
)
|
||||
|
||||
llm = LLM(
|
||||
model="meta-llama/Llama-3.1-8B-Instruct",
|
||||
tensor_parallel_size=1,
|
||||
)
|
||||
|
||||
# Outputs will be deterministic regardless of batch size
|
||||
outputs = llm.generate(prompts, sampling_params)
|
||||
|
||||
for output in outputs:
|
||||
prompt = output.prompt
|
||||
generated_text = output.outputs[0].text
|
||||
print(f"Prompt: {prompt!r}")
|
||||
print(f"Generated: {generated_text!r}\n")
|
||||
```
|
||||
|
||||
## Tested Models
|
||||
|
||||
Batch invariance has been tested and verified on the following models:
|
||||
|
||||
- **DeepSeek series**: `deepseek-ai/DeepSeek-V3`, `deepseek-ai/DeepSeek-V3-0324`, `deepseek-ai/DeepSeek-R1`, `deepseek-ai/DeepSeek-V3.1`
|
||||
- **Qwen3 (Dense)**: `Qwen/Qwen3-1.7B`, `Qwen/Qwen3-8B`
|
||||
- **Qwen3 (MoE)**: `Qwen/Qwen3-30B-A3B`, `Qwen/Qwen3-Next-80B-A3B-Instruct`
|
||||
- **Llama 3**: `meta-llama/Llama-3.1-8B-Instruct`, `meta-llama/Llama-3.2-1B-Instruct`
|
||||
|
||||
Other models may also work, but these have been explicitly validated. If you encounter issues with a specific model, please report them on the [GitHub issue tracker](https://github.com/vllm-project/vllm/issues/new/choose).
|
||||
|
||||
## Implementation Details
|
||||
|
||||
When batch invariance is enabled, vLLM:
|
||||
|
||||
1. Uses deterministic kernel implementations for attention and other operations
|
||||
2. Ensures consistent numerical behavior across different batch sizes
|
||||
3. Disables certain optimizations that may introduce non-determinism (such as custom all-reduce operations in tensor parallel mode)
|
||||
|
||||
!!! note
|
||||
Enabling batch invariance may impact performance compared to the default non-deterministic mode. This trade-off is intentional to guarantee reproducibility.
|
||||
|
||||
## Future Improvements
|
||||
|
||||
The batch invariance feature is under active development. Planned improvements include:
|
||||
|
||||
- Support for additional GPU architectures
|
||||
- Expanded model coverage
|
||||
- Performance optimizations
|
||||
- Additional testing and validation
|
||||
|
||||
For the latest status and to contribute ideas, see the [tracking issue](https://github.com/vllm-project/vllm/issues/27433).
|
||||
@ -4,6 +4,9 @@ You can use vLLM *custom arguments* to pass in arguments which are not part of t
|
||||
|
||||
Custom arguments can be useful if, for example, you want to use a [custom logits processor](./custom_logitsprocs.md) without modifying the vLLM source code.
|
||||
|
||||
!!! note
|
||||
Make sure your custom logits processor have implemented `validate_params` for custom arguments. Otherwise invalid custom arguments can cause unexpected behaviour.
|
||||
|
||||
## Offline Custom Arguments
|
||||
|
||||
Custom arguments passed to `SamplingParams.extra_args` as a `dict` will be visible to any code which has access to `SamplingParams`:
|
||||
|
||||
@ -18,6 +18,11 @@ In vLLM, logits processors operate at batch granularity. During a given engine s
|
||||
|
||||
Custom logits processors must subclass `vllm.v1.sample.logits_processor.LogitsProcessor` and define (at minimum) the following methods:
|
||||
|
||||
* `validate_params(cls, sampling_params: SamplingParams)`:
|
||||
* Raise `ValueError` if `SamplingParams` has invalid arguments (especially custom arguments) used by logits processor.
|
||||
* When request is sent to entrypoint, `validate_params()` will validate `SamplingParams` and refuse request with invalid arguments.
|
||||
* **Note:** it's important to implement `validate_params()` to prevent invalid parameters for custom logits processor. Otherwise requests with invalid parameters can cause unexpected behaviour in custom logits processor.
|
||||
|
||||
* `__init__(self, vllm_config: VllmConfig, device: torch.device, is_pin_memory: bool)`
|
||||
* `vllm_config`: engine configuration data structure
|
||||
* `device`: hardware accelerator device info
|
||||
@ -103,6 +108,14 @@ The contrived example below implements a custom logits processor which consumes
|
||||
class DummyLogitsProcessor(LogitsProcessor):
|
||||
"""Fake logit processor to support unit testing and examples"""
|
||||
|
||||
@classmethod
|
||||
def validate_params(cls, params: SamplingParams):
|
||||
target_token: int | None = params.extra_args and params.extra_args.get(
|
||||
"target_token"
|
||||
)
|
||||
if target_token is not None and not isinstance(target_token, int):
|
||||
raise ValueError(f"target_token value {target_token} is not int")
|
||||
|
||||
def __init__(self, vllm_config: "VllmConfig", device: torch.device,
|
||||
is_pin_memory: bool):
|
||||
self.req_info: dict[int, int] = {}
|
||||
@ -118,6 +131,7 @@ The contrived example below implements a custom logits processor which consumes
|
||||
# Process added requests.
|
||||
for index, params, _, _ in batch_update.added:
|
||||
assert params is not None
|
||||
self.validate_params(params)
|
||||
if params.extra_args and (target_token :=
|
||||
params.extra_args.get("target_token")):
|
||||
self.req_info[index] = target_token
|
||||
@ -157,6 +171,7 @@ The contrived example below implements a custom logits processor which consumes
|
||||
logits[rows, cols] = values_to_keep
|
||||
|
||||
return logits
|
||||
|
||||
```
|
||||
|
||||
In the rest of this document, we will use `DummyLogitsProcessor` as an example of a custom logits processor.
|
||||
@ -180,7 +195,13 @@ RequestLogitsProcessor = Union[
|
||||
|
||||
While request-level logits processors are explicitly *not* supported in the vLLM engine, vLLM *does* provide a convenient process to wrap an existing `Callable` request-level logits processor and create a batch-level logits processor that is compatible with vLLM. The `Callable` must conform to the type annotation above; if your request-level logits processor has a different interface, then in order to wrap it, you may need to modify it or implement an additional wrapper layer to comply with the interface specification above.
|
||||
|
||||
You can wrap the request-level logits processor by subclassing `AdapterLogitsProcessor` as shown in the example below (in this example, `DummyPerReqLogitsProcessor` is a stand-in for your request-level logits processor which needs to be wrapped.) Override `AdapterLogitsProcessor.is_argmax_invariant(self)` to accurately reflect whether your request-level logits processor may impact which token has the highest-value logit. Override `AdapterLogitsProcessor.new_req_logits_processor(self,params)` to create a new request-level logits processor instance from a `SamplingParams` instance:
|
||||
You can wrap the request-level logits processor by subclassing `AdapterLogitsProcessor` as shown in the example below (in this example, `DummyPerReqLogitsProcessor` is a stand-in for your request-level logits processor which needs to be wrapped.):
|
||||
|
||||
* Override `AdapterLogitsProcessor.validate_params(cls,params)` to validate request's sampling parameters.
|
||||
|
||||
* Override `AdapterLogitsProcessor.is_argmax_invariant(self)` to accurately reflect whether your request-level logits processor may impact which token has the highest-value logit.
|
||||
|
||||
* Override `AdapterLogitsProcessor.new_req_logits_processor(self,params)` to create a new request-level logits processor instance from a `SamplingParams` instance:
|
||||
|
||||
??? code "Example of Wrapping a Request-Level Logits Processor"
|
||||
|
||||
@ -220,6 +241,16 @@ You can wrap the request-level logits processor by subclassing `AdapterLogitsPro
|
||||
"""Example of wrapping a fake request-level logit processor to create a
|
||||
batch-level logits processor"""
|
||||
|
||||
@classmethod
|
||||
def validate_params(cls, params: SamplingParams):
|
||||
target_token: Any | None = params.extra_args and params.extra_args.get(
|
||||
"target_token"
|
||||
)
|
||||
if target_token is not None and not isinstance(target_token, int):
|
||||
raise ValueError(
|
||||
f"target_token value {target_token} is not int"
|
||||
)
|
||||
|
||||
def is_argmax_invariant(self) -> bool:
|
||||
return False
|
||||
|
||||
@ -240,18 +271,11 @@ You can wrap the request-level logits processor by subclassing `AdapterLogitsPro
|
||||
Returns:
|
||||
`Callable` request logits processor, or None
|
||||
"""
|
||||
target_token: Optional[Any] = params.extra_args and params.extra_args.get(
|
||||
target_token: Any | None = params.extra_args and params.extra_args.get(
|
||||
"target_token"
|
||||
)
|
||||
if target_token is None:
|
||||
return None
|
||||
if not isinstance(target_token, int):
|
||||
logger.warning(
|
||||
"target_token value %s is not int; not applying logits"
|
||||
" processor to request.",
|
||||
target_token,
|
||||
)
|
||||
return None
|
||||
return DummyPerReqLogitsProcessor(target_token)
|
||||
```
|
||||
|
||||
|
||||
@ -509,8 +509,8 @@ Then, you can use the OpenAI client as follows:
|
||||
print("Chat completion output:", chat_response.choices[0].message.content)
|
||||
|
||||
# Multi-image input inference
|
||||
image_url_duck = "https://upload.wikimedia.org/wikipedia/commons/d/da/2015_Kaczka_krzy%C5%BCowka_w_wodzie_%28samiec%29.jpg"
|
||||
image_url_lion = "https://upload.wikimedia.org/wikipedia/commons/7/77/002_The_lion_king_Snyggve_in_the_Serengeti_National_Park_Photo_by_Giles_Laurent.jpg"
|
||||
image_url_duck = "https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/duck.jpg"
|
||||
image_url_lion = "https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/lion.jpg"
|
||||
|
||||
chat_response = client.chat.completions.create(
|
||||
model="microsoft/Phi-3.5-vision-instruct",
|
||||
|
||||
@ -81,7 +81,7 @@ python tests/v1/kv_connector/nixl_integration/toy_proxy_server.py \
|
||||
- Default: 5600
|
||||
- **Required for both prefiller and decoder instances**
|
||||
- Each vLLM worker needs a unique port on its host; using the same port number across different hosts is fine
|
||||
- For TP/DP deployments, each worker's port on a node is computed as: base_port + dp_rank * tp_size + tp_rank (e.g., with `--tensor-parallel-size=4` and base_port=5600, tp_rank 0..3 use ports 5600, 5601, 5602, 5603 on that node).
|
||||
- For TP/DP deployments, each worker's port on a node is computed as: base_port + dp_rank (e.g., with `--data-parallel-size=2` and base_port=5600, dp_rank 0..1 use port 5600, 5601 on that node).
|
||||
- Used for the initial NIXL handshake between the prefiller and the decoder
|
||||
|
||||
- `VLLM_NIXL_SIDE_CHANNEL_HOST`: Host for side channel communication
|
||||
|
||||
@ -2,7 +2,10 @@
|
||||
|
||||
vLLM offers support for reasoning models like [DeepSeek R1](https://huggingface.co/deepseek-ai/DeepSeek-R1), which are designed to generate outputs containing both reasoning steps and final conclusions.
|
||||
|
||||
Reasoning models return an additional `reasoning_content` field in their outputs, which contains the reasoning steps that led to the final conclusion. This field is not present in the outputs of other models.
|
||||
Reasoning models return an additional `reasoning` field in their outputs, which contains the reasoning steps that led to the final conclusion. This field is not present in the outputs of other models.
|
||||
|
||||
!!! warning
|
||||
`reasoning` used to be called `reasoning_content`. For now, `reasoning_content` will continue to work. However, we encourage you to migrate to `reasoning` in case `reasoning_content` is removed in future.
|
||||
|
||||
## Supported Models
|
||||
|
||||
@ -61,18 +64,18 @@ Next, make a request to the model that should return the reasoning content in th
|
||||
# extra_body={"chat_template_kwargs": {"enable_thinking": False}}
|
||||
response = client.chat.completions.create(model=model, messages=messages)
|
||||
|
||||
reasoning_content = response.choices[0].message.reasoning_content
|
||||
reasoning = response.choices[0].message.reasoning
|
||||
content = response.choices[0].message.content
|
||||
|
||||
print("reasoning_content:", reasoning_content)
|
||||
print("reasoning:", reasoning)
|
||||
print("content:", content)
|
||||
```
|
||||
|
||||
The `reasoning_content` field contains the reasoning steps that led to the final conclusion, while the `content` field contains the final conclusion.
|
||||
The `reasoning` field contains the reasoning steps that led to the final conclusion, while the `content` field contains the final conclusion.
|
||||
|
||||
## Streaming chat completions
|
||||
|
||||
Streaming chat completions are also supported for reasoning models. The `reasoning_content` field is available in the `delta` field in [chat completion response chunks](https://platform.openai.com/docs/api-reference/chat/streaming).
|
||||
Streaming chat completions are also supported for reasoning models. The `reasoning` field is available in the `delta` field in [chat completion response chunks](https://platform.openai.com/docs/api-reference/chat/streaming).
|
||||
|
||||
??? console "Json"
|
||||
|
||||
@ -88,7 +91,7 @@ Streaming chat completions are also supported for reasoning models. The `reasoni
|
||||
"index": 0,
|
||||
"delta": {
|
||||
"role": "assistant",
|
||||
"reasoning_content": "is",
|
||||
"reasoning": "is",
|
||||
},
|
||||
"logprobs": null,
|
||||
"finish_reason": null
|
||||
@ -97,7 +100,7 @@ Streaming chat completions are also supported for reasoning models. The `reasoni
|
||||
}
|
||||
```
|
||||
|
||||
OpenAI Python client library does not officially support `reasoning_content` attribute for streaming output. But the client supports extra attributes in the response. You can use `hasattr` to check if the `reasoning_content` attribute is present in the response. For example:
|
||||
OpenAI Python client library does not officially support `reasoning` attribute for streaming output. But the client supports extra attributes in the response. You can use `hasattr` to check if the `reasoning` attribute is present in the response. For example:
|
||||
|
||||
??? code
|
||||
|
||||
@ -127,22 +130,22 @@ OpenAI Python client library does not officially support `reasoning_content` att
|
||||
)
|
||||
|
||||
print("client: Start streaming chat completions...")
|
||||
printed_reasoning_content = False
|
||||
printed_reasoning = False
|
||||
printed_content = False
|
||||
|
||||
for chunk in stream:
|
||||
# Safely extract reasoning_content and content from delta,
|
||||
# Safely extract reasoning and content from delta,
|
||||
# defaulting to None if attributes don't exist or are empty strings
|
||||
reasoning_content = (
|
||||
getattr(chunk.choices[0].delta, "reasoning_content", None) or None
|
||||
reasoning = (
|
||||
getattr(chunk.choices[0].delta, "reasoning", None) or None
|
||||
)
|
||||
content = getattr(chunk.choices[0].delta, "content", None) or None
|
||||
|
||||
if reasoning_content is not None:
|
||||
if not printed_reasoning_content:
|
||||
printed_reasoning_content = True
|
||||
print("reasoning_content:", end="", flush=True)
|
||||
print(reasoning_content, end="", flush=True)
|
||||
if reasoning is not None:
|
||||
if not printed_reasoning:
|
||||
printed_reasoning = True
|
||||
print("reasoning:", end="", flush=True)
|
||||
print(reasoning, end="", flush=True)
|
||||
elif content is not None:
|
||||
if not printed_content:
|
||||
printed_content = True
|
||||
@ -151,11 +154,11 @@ OpenAI Python client library does not officially support `reasoning_content` att
|
||||
print(content, end="", flush=True)
|
||||
```
|
||||
|
||||
Remember to check whether the `reasoning_content` exists in the response before accessing it. You could check out the [example](https://github.com/vllm-project/vllm/blob/main/examples/online_serving/openai_chat_completion_with_reasoning_streaming.py).
|
||||
Remember to check whether the `reasoning` exists in the response before accessing it. You could check out the [example](https://github.com/vllm-project/vllm/blob/main/examples/online_serving/openai_chat_completion_with_reasoning_streaming.py).
|
||||
|
||||
## Tool Calling
|
||||
|
||||
The reasoning content is also available when both tool calling and the reasoning parser are enabled. Additionally, tool calling only parses functions from the `content` field, not from the `reasoning_content`.
|
||||
The reasoning content is also available when both tool calling and the reasoning parser are enabled. Additionally, tool calling only parses functions from the `content` field, not from the `reasoning`.
|
||||
|
||||
??? code
|
||||
|
||||
@ -192,7 +195,7 @@ The reasoning content is also available when both tool calling and the reasoning
|
||||
print(response)
|
||||
tool_call = response.choices[0].message.tool_calls[0].function
|
||||
|
||||
print(f"reasoning_content: {response.choices[0].message.reasoning_content}")
|
||||
print(f"reasoning: {response.choices[0].message.reasoning}")
|
||||
print(f"Function called: {tool_call.name}")
|
||||
print(f"Arguments: {tool_call.arguments}")
|
||||
```
|
||||
@ -219,12 +222,11 @@ You can add a new `ReasoningParser` similar to [vllm/reasoning/deepseek_r1_reaso
|
||||
# define a reasoning parser and register it to vllm
|
||||
# the name list in register_module can be used
|
||||
# in --reasoning-parser.
|
||||
@ReasoningParserManager.register_module(["example"])
|
||||
class ExampleParser(ReasoningParser):
|
||||
def __init__(self, tokenizer: AnyTokenizer):
|
||||
super().__init__(tokenizer)
|
||||
|
||||
def extract_reasoning_content_streaming(
|
||||
def extract_reasoning_streaming(
|
||||
self,
|
||||
previous_text: str,
|
||||
current_text: str,
|
||||
@ -241,7 +243,7 @@ You can add a new `ReasoningParser` similar to [vllm/reasoning/deepseek_r1_reaso
|
||||
previously been parsed and extracted (see constructor)
|
||||
"""
|
||||
|
||||
def extract_reasoning_content(
|
||||
def extract_reasoning(
|
||||
self,
|
||||
model_output: str,
|
||||
request: ChatCompletionRequest | ResponsesRequest,
|
||||
@ -263,6 +265,12 @@ You can add a new `ReasoningParser` similar to [vllm/reasoning/deepseek_r1_reaso
|
||||
tuple[Optional[str], Optional[str]]
|
||||
A tuple containing the reasoning content and the content.
|
||||
"""
|
||||
# Register the reasoning parser
|
||||
ReasoningParserManager.register_lazy_module(
|
||||
name="example",
|
||||
module_path="vllm.reasoning.example_reasoning_parser",
|
||||
class_name="ExampleParser",
|
||||
)
|
||||
```
|
||||
|
||||
Additionally, to enable structured output, you'll need to create a new `Reasoner` similar to the one in [vllm/reasoning/deepseek_r1_reasoning_parser.py](../../vllm/reasoning/deepseek_r1_reasoning_parser.py).
|
||||
|
||||
@ -130,6 +130,46 @@ matching n-grams in the prompt. For more information read [this thread.](https:/
|
||||
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
|
||||
```
|
||||
|
||||
## Speculating using Suffix Decoding
|
||||
|
||||
The following code configures vLLM to use speculative decoding where proposals are generated using Suffix Decoding ([technical report](https://arxiv.org/abs/2411.04975)).
|
||||
|
||||
Like n-gram, Suffix Decoding can generate draft tokens by pattern-matching using the last `n` generated tokens. Unlike n-gram, Suffix Decoding (1) can pattern-match against both the prompt and previous generations, (2) uses frequency counts to propose the most likely continuations, and (3) speculates an adaptive number of tokens for each request at each iteration to get better acceptance rates.
|
||||
|
||||
Suffix Decoding can achieve better performance for tasks with high repetition, such as code-editing, agentic loops (e.g. self-reflection, self-consistency), and RL rollouts.
|
||||
|
||||
!!! tip "Install Arctic Inference"
|
||||
Suffix Decoding requires [Arctic Inference](https://github.com/snowflakedb/ArcticInference). You can install it with `pip install arctic-inference`.
|
||||
|
||||
!!! tip "Suffix Decoding Speculative Tokens"
|
||||
Suffix Decoding will speculate a dynamic number of tokens for each request at each decoding step, so the `num_speculative_tokens` configuration specifies the *maximum* number of speculative tokens. It is suggested to use a high number such as `16` or `32` (default).
|
||||
|
||||
??? code
|
||||
|
||||
```python
|
||||
from vllm import LLM, SamplingParams
|
||||
|
||||
prompts = [
|
||||
"The future of AI is",
|
||||
]
|
||||
sampling_params = SamplingParams(temperature=0.8, top_p=0.95)
|
||||
|
||||
llm = LLM(
|
||||
model="facebook/opt-6.7b",
|
||||
tensor_parallel_size=1,
|
||||
speculative_config={
|
||||
"method": "suffix",
|
||||
"num_speculative_tokens": 32,
|
||||
},
|
||||
)
|
||||
outputs = llm.generate(prompts, sampling_params)
|
||||
|
||||
for output in outputs:
|
||||
prompt = output.prompt
|
||||
generated_text = output.outputs[0].text
|
||||
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
|
||||
```
|
||||
|
||||
## Speculating using MLP speculators
|
||||
|
||||
The following code configures vLLM to use speculative decoding where proposals are generated by
|
||||
|
||||
@ -204,7 +204,7 @@ Note that you can use reasoning with any provided structured outputs feature. Th
|
||||
}
|
||||
},
|
||||
)
|
||||
print("reasoning_content: ", completion.choices[0].message.reasoning_content)
|
||||
print("reasoning: ", completion.choices[0].message.reasoning)
|
||||
print("content: ", completion.choices[0].message.content)
|
||||
```
|
||||
|
||||
|
||||
@ -407,7 +407,6 @@ Here is a summary of a plugin file:
|
||||
# the name list in register_module can be used
|
||||
# in --tool-call-parser. you can define as many
|
||||
# tool parsers as you want here.
|
||||
@ToolParserManager.register_module(["example"])
|
||||
class ExampleToolParser(ToolParser):
|
||||
def __init__(self, tokenizer: AnyTokenizer):
|
||||
super().__init__(tokenizer)
|
||||
@ -439,6 +438,12 @@ Here is a summary of a plugin file:
|
||||
return ExtractedToolCallInformation(tools_called=False,
|
||||
tool_calls=[],
|
||||
content=text)
|
||||
# register the tool parser to ToolParserManager
|
||||
ToolParserManager.register_lazy_module(
|
||||
name="example",
|
||||
module_path="vllm.entrypoints.openai.tool_parsers.example",
|
||||
class_name="ExampleToolParser",
|
||||
)
|
||||
|
||||
```
|
||||
|
||||
|
||||
@ -2,4 +2,4 @@ nav:
|
||||
- README.md
|
||||
- gpu.md
|
||||
- cpu.md
|
||||
- google_tpu.md
|
||||
- TPU: https://docs.vllm.ai/projects/tpu/en/latest/getting_started/installation/
|
||||
|
||||
@ -11,7 +11,6 @@ vLLM supports the following hardware platforms:
|
||||
- [ARM AArch64](cpu.md#arm-aarch64)
|
||||
- [Apple silicon](cpu.md#apple-silicon)
|
||||
- [IBM Z (S390X)](cpu.md#ibm-z-s390x)
|
||||
- [Google TPU](google_tpu.md)
|
||||
|
||||
## Hardware Plugins
|
||||
|
||||
@ -20,6 +19,7 @@ The backends below live **outside** the main `vllm` repository and follow the
|
||||
|
||||
| Accelerator | PyPI / package | Repository |
|
||||
|-------------|----------------|------------|
|
||||
| Google TPU | `tpu-inference` | <https://github.com/vllm-project/tpu-inference> |
|
||||
| Ascend NPU | `vllm-ascend` | <https://github.com/vllm-project/vllm-ascend> |
|
||||
| Intel Gaudi (HPU) | N/A, install from source | <https://github.com/vllm-project/vllm-gaudi> |
|
||||
| MetaX MACA GPU | N/A, install from source | <https://github.com/MetaX-MACA/vLLM-metax> |
|
||||
|
||||
@ -94,7 +94,7 @@ Currently, there are no pre-built CPU wheels.
|
||||
## Related runtime environment variables
|
||||
|
||||
- `VLLM_CPU_KVCACHE_SPACE`: specify the KV Cache size (e.g, `VLLM_CPU_KVCACHE_SPACE=40` means 40 GiB space for KV cache), larger setting will allow vLLM running more requests in parallel. This parameter should be set based on the hardware configuration and memory management pattern of users. Default value is `0`.
|
||||
- `VLLM_CPU_OMP_THREADS_BIND`: specify the CPU cores dedicated to the OpenMP threads, can be set as CPU id lists or `auto` (by default). For example, `VLLM_CPU_OMP_THREADS_BIND=0-31` means there will be 32 OpenMP threads bound on 0-31 CPU cores. `VLLM_CPU_OMP_THREADS_BIND=0-31|32-63` means there will be 2 tensor parallel processes, 32 OpenMP threads of rank0 are bound on 0-31 CPU cores, and the OpenMP threads of rank1 are bound on 32-63 CPU cores. By setting to `auto`, the OpenMP threads of each rank are bound to the CPU cores in each NUMA node respectively.
|
||||
- `VLLM_CPU_OMP_THREADS_BIND`: specify the CPU cores dedicated to the OpenMP threads, can be set as CPU id lists, `auto` (by default), or `nobind` (to disable binding to individual CPU cores and to inherit user-defined OpenMP variables). For example, `VLLM_CPU_OMP_THREADS_BIND=0-31` means there will be 32 OpenMP threads bound on 0-31 CPU cores. `VLLM_CPU_OMP_THREADS_BIND=0-31|32-63` means there will be 2 tensor parallel processes, 32 OpenMP threads of rank0 are bound on 0-31 CPU cores, and the OpenMP threads of rank1 are bound on 32-63 CPU cores. By setting to `auto`, the OpenMP threads of each rank are bound to the CPU cores in each NUMA node respectively. If set to `nobind`, the number of OpenMP threads is determined by the standard `OMP_NUM_THREADS` environment variable.
|
||||
- `VLLM_CPU_NUM_OF_RESERVED_CPU`: specify the number of CPU cores which are not dedicated to the OpenMP threads for each rank. The variable only takes effect when VLLM_CPU_OMP_THREADS_BIND is set to `auto`. Default value is `None`. If the value is not set and use `auto` thread binding, no CPU will be reserved for `world_size == 1`, 1 CPU per rank will be reserved for `world_size > 1`.
|
||||
- `CPU_VISIBLE_MEMORY_NODES`: specify visible NUMA memory nodes for vLLM CPU workers, similar to ```CUDA_VISIBLE_DEVICES```. The variable only takes effect when VLLM_CPU_OMP_THREADS_BIND is set to `auto`. The variable provides more control for the auto thread-binding feature, such as masking nodes and changing nodes binding sequence.
|
||||
- `VLLM_CPU_MOE_PREPACK` (x86 only): whether to use prepack for MoE layer. This will be passed to `ipex.llm.modules.GatedMLPMOE`. Default is `1` (True). On unsupported CPUs, you might need to set this to `0` (False).
|
||||
|
||||
@ -1,193 +0,0 @@
|
||||
# Google TPU
|
||||
|
||||
Tensor Processing Units (TPUs) are Google's custom-developed application-specific
|
||||
integrated circuits (ASICs) used to accelerate machine learning workloads. TPUs
|
||||
are available in different versions each with different hardware specifications.
|
||||
For more information about TPUs, see [TPU System Architecture](https://cloud.google.com/tpu/docs/system-architecture-tpu-vm).
|
||||
For more information on the TPU versions supported with vLLM, see:
|
||||
|
||||
- [TPU v6e](https://cloud.google.com/tpu/docs/v6e)
|
||||
- [TPU v5e](https://cloud.google.com/tpu/docs/v5e)
|
||||
- [TPU v5p](https://cloud.google.com/tpu/docs/v5p)
|
||||
- [TPU v4](https://cloud.google.com/tpu/docs/v4)
|
||||
|
||||
These TPU versions allow you to configure the physical arrangements of the TPU
|
||||
chips. This can improve throughput and networking performance. For more
|
||||
information see:
|
||||
|
||||
- [TPU v6e topologies](https://cloud.google.com/tpu/docs/v6e#configurations)
|
||||
- [TPU v5e topologies](https://cloud.google.com/tpu/docs/v5e#tpu-v5e-config)
|
||||
- [TPU v5p topologies](https://cloud.google.com/tpu/docs/v5p#tpu-v5p-config)
|
||||
- [TPU v4 topologies](https://cloud.google.com/tpu/docs/v4#tpu-v4-config)
|
||||
|
||||
In order for you to use Cloud TPUs you need to have TPU quota granted to your
|
||||
Google Cloud Platform project. TPU quotas specify how many TPUs you can use in a
|
||||
GPC project and are specified in terms of TPU version, the number of TPU you
|
||||
want to use, and quota type. For more information, see [TPU quota](https://cloud.google.com/tpu/docs/quota#tpu_quota).
|
||||
|
||||
For TPU pricing information, see [Cloud TPU pricing](https://cloud.google.com/tpu/pricing).
|
||||
|
||||
You may need additional persistent storage for your TPU VMs. For more
|
||||
information, see [Storage options for Cloud TPU data](https://cloud.devsite.corp.google.com/tpu/docs/storage-options).
|
||||
|
||||
!!! warning
|
||||
There are no pre-built wheels for this device, so you must either use the pre-built Docker image or build vLLM from source.
|
||||
|
||||
## Requirements
|
||||
|
||||
- Google Cloud TPU VM
|
||||
- TPU versions: v6e, v5e, v5p, v4
|
||||
- Python: 3.11 or newer
|
||||
|
||||
### Provision Cloud TPUs
|
||||
|
||||
You can provision Cloud TPUs using the [Cloud TPU API](https://cloud.google.com/tpu/docs/reference/rest)
|
||||
or the [queued resources](https://cloud.google.com/tpu/docs/queued-resources)
|
||||
API (preferred). This section shows how to create TPUs using the queued resource API. For
|
||||
more information about using the Cloud TPU API, see [Create a Cloud TPU using the Create Node API](https://cloud.google.com/tpu/docs/managing-tpus-tpu-vm#create-node-api).
|
||||
Queued resources enable you to request Cloud TPU resources in a queued manner.
|
||||
When you request queued resources, the request is added to a queue maintained by
|
||||
the Cloud TPU service. When the requested resource becomes available, it's
|
||||
assigned to your Google Cloud project for your immediate exclusive use.
|
||||
|
||||
!!! note
|
||||
In all of the following commands, replace the ALL CAPS parameter names with
|
||||
appropriate values. See the parameter descriptions table for more information.
|
||||
|
||||
### Provision Cloud TPUs with GKE
|
||||
|
||||
For more information about using TPUs with GKE, see:
|
||||
|
||||
- [About TPUs in GKE](https://cloud.google.com/kubernetes-engine/docs/concepts/tpus)
|
||||
- [Deploy TPU workloads in GKE Standard](https://cloud.google.com/kubernetes-engine/docs/how-to/tpus)
|
||||
- [Plan for TPUs in GKE](https://cloud.google.com/kubernetes-engine/docs/concepts/plan-tpus)
|
||||
|
||||
## Configure a new environment
|
||||
|
||||
### Provision a Cloud TPU with the queued resource API
|
||||
|
||||
Create a TPU v5e with 4 TPU chips:
|
||||
|
||||
```bash
|
||||
gcloud alpha compute tpus queued-resources create QUEUED_RESOURCE_ID \
|
||||
--node-id TPU_NAME \
|
||||
--project PROJECT_ID \
|
||||
--zone ZONE \
|
||||
--accelerator-type ACCELERATOR_TYPE \
|
||||
--runtime-version RUNTIME_VERSION \
|
||||
--service-account SERVICE_ACCOUNT
|
||||
```
|
||||
|
||||
| Parameter name | Description |
|
||||
|--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| QUEUED_RESOURCE_ID | The user-assigned ID of the queued resource request. |
|
||||
| TPU_NAME | The user-assigned name of the TPU which is created when the queued resource request is allocated. |
|
||||
| PROJECT_ID | Your Google Cloud project |
|
||||
| ZONE | The GCP zone where you want to create your Cloud TPU. The value you use depends on the version of TPUs you are using. For more information, see [TPU regions and zones] |
|
||||
| ACCELERATOR_TYPE | The TPU version you want to use. Specify the TPU version, for example `v5litepod-4` specifies a v5e TPU with 4 cores, `v6e-1` specifies a v6e TPU with 1 core. For more information, see [TPU versions]. |
|
||||
| RUNTIME_VERSION | The TPU VM runtime version to use. For example, use `v2-alpha-tpuv6e` for a VM loaded with one or more v6e TPU(s). |
|
||||
| SERVICE_ACCOUNT | The email address for your service account. You can find it in the IAM Cloud Console under *Service Accounts*. For example: `tpu-service-account@<your_project_ID>.iam.gserviceaccount.com` |
|
||||
|
||||
Connect to your TPU VM using SSH:
|
||||
|
||||
```bash
|
||||
gcloud compute tpus tpu-vm ssh TPU_NAME --project PROJECT_ID --zone ZONE
|
||||
```
|
||||
|
||||
!!! note
|
||||
When configuring `RUNTIME_VERSION` ("TPU software version") on GCP, ensure it matches the TPU generation you've selected by referencing the [TPU VM images] compatibility matrix. Using an incompatible version may prevent vLLM from running correctly.
|
||||
|
||||
[TPU versions]: https://cloud.google.com/tpu/docs/runtimes
|
||||
[TPU VM images]: https://cloud.google.com/tpu/docs/runtimes
|
||||
[TPU regions and zones]: https://cloud.google.com/tpu/docs/regions-zones
|
||||
|
||||
## Set up using Python
|
||||
|
||||
### Pre-built wheels
|
||||
|
||||
Currently, there are no pre-built TPU wheels.
|
||||
|
||||
### Build wheel from source
|
||||
|
||||
Install Miniconda:
|
||||
|
||||
```bash
|
||||
wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh
|
||||
bash Miniconda3-latest-Linux-x86_64.sh
|
||||
source ~/.bashrc
|
||||
```
|
||||
|
||||
Create and activate a Conda environment for vLLM:
|
||||
|
||||
```bash
|
||||
conda create -n vllm python=3.12 -y
|
||||
conda activate vllm
|
||||
```
|
||||
|
||||
Clone the vLLM repository and go to the vLLM directory:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/vllm-project/vllm.git && cd vllm
|
||||
```
|
||||
|
||||
Uninstall the existing `torch` and `torch_xla` packages:
|
||||
|
||||
```bash
|
||||
pip uninstall torch torch-xla -y
|
||||
```
|
||||
|
||||
Install build dependencies:
|
||||
|
||||
```bash
|
||||
pip install -r requirements/tpu.txt
|
||||
sudo apt-get install --no-install-recommends --yes libopenblas-base libopenmpi-dev libomp-dev
|
||||
```
|
||||
|
||||
Run the setup script:
|
||||
|
||||
```bash
|
||||
VLLM_TARGET_DEVICE="tpu" python -m pip install -e .
|
||||
```
|
||||
|
||||
## Set up using Docker
|
||||
|
||||
### Pre-built images
|
||||
|
||||
See [Using Docker](../../deployment/docker.md) for instructions on using the official Docker image, making sure to substitute the image name `vllm/vllm-openai` with `vllm/vllm-tpu`.
|
||||
|
||||
### Build image from source
|
||||
|
||||
You can use [docker/Dockerfile.tpu](../../../docker/Dockerfile.tpu) to build a Docker image with TPU support.
|
||||
|
||||
```bash
|
||||
docker build -f docker/Dockerfile.tpu -t vllm-tpu .
|
||||
```
|
||||
|
||||
Run the Docker image with the following command:
|
||||
|
||||
```bash
|
||||
# Make sure to add `--privileged --net host --shm-size=16G`.
|
||||
docker run --privileged --net host --shm-size=16G -it vllm-tpu
|
||||
```
|
||||
|
||||
!!! note
|
||||
Since TPU relies on XLA which requires static shapes, vLLM bucketizes the
|
||||
possible input shapes and compiles an XLA graph for each shape. The
|
||||
compilation time may take 20~30 minutes in the first run. However, the
|
||||
compilation time reduces to ~5 minutes afterwards because the XLA graphs are
|
||||
cached in the disk (in `VLLM_XLA_CACHE_PATH` or `~/.cache/vllm/xla_cache` by default).
|
||||
|
||||
!!! tip
|
||||
If you encounter the following error:
|
||||
|
||||
```console
|
||||
from torch._C import * # noqa: F403
|
||||
ImportError: libopenblas.so.0: cannot open shared object file: No such
|
||||
file or directory
|
||||
```
|
||||
|
||||
Install OpenBLAS with the following command:
|
||||
|
||||
```bash
|
||||
sudo apt-get install --no-install-recommends --yes libopenblas-base libopenmpi-dev libomp-dev
|
||||
```
|
||||
@ -11,9 +11,10 @@ vLLM supports AMD GPUs with ROCm 6.3 or above, and torch 2.8.0 and above.
|
||||
# --8<-- [end:installation]
|
||||
# --8<-- [start:requirements]
|
||||
|
||||
- GPU: MI200s (gfx90a), MI300 (gfx942), MI350 (gfx950), Radeon RX 7900 series (gfx1100/1101), Radeon RX 9000 series (gfx1200/1201)
|
||||
- GPU: MI200s (gfx90a), MI300 (gfx942), MI350 (gfx950), Radeon RX 7900 series (gfx1100/1101), Radeon RX 9000 series (gfx1200/1201), Ryzen AI MAX / AI 300 Series (gfx1151/1150)
|
||||
- ROCm 6.3 or above
|
||||
- MI350 requires ROCm 7.0 or above
|
||||
- Ryzen AI MAX / AI 300 Series requires ROCm 7.0.2 or above
|
||||
|
||||
# --8<-- [end:requirements]
|
||||
# --8<-- [start:set-up-using-python]
|
||||
|
||||
@ -63,6 +63,17 @@ This guide will help you quickly get started with vLLM to perform:
|
||||
rocm/vllm-dev:nightly
|
||||
```
|
||||
|
||||
=== "Google TPU"
|
||||
|
||||
To run vLLM on Google TPUs, you need to install the `vllm-tpu` package.
|
||||
|
||||
```bash
|
||||
uv pip install vllm-tpu
|
||||
```
|
||||
|
||||
!!! note
|
||||
For more detailed instructions, including Docker, installing from source, and troubleshooting, please refer to the [vLLM on TPU documentation](https://docs.vllm.ai/projects/tpu/en/latest/).
|
||||
|
||||
!!! note
|
||||
For more detail and non-CUDA platforms, please refer [here](installation/README.md) for specific instructions on how to install vLLM.
|
||||
|
||||
|
||||
@ -3,6 +3,7 @@
|
||||
import importlib
|
||||
import logging
|
||||
import sys
|
||||
import traceback
|
||||
from argparse import SUPPRESS, HelpFormatter
|
||||
from pathlib import Path
|
||||
from typing import Literal
|
||||
@ -16,7 +17,30 @@ ROOT_DIR = Path(__file__).parent.parent.parent.parent
|
||||
ARGPARSE_DOC_DIR = ROOT_DIR / "docs/argparse"
|
||||
|
||||
sys.path.insert(0, str(ROOT_DIR))
|
||||
|
||||
|
||||
# Mock custom op code
|
||||
class MockCustomOp:
|
||||
@staticmethod
|
||||
def register(name):
|
||||
def decorator(cls):
|
||||
return cls
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
noop = lambda *a, **k: None
|
||||
sys.modules["vllm._C"] = MagicMock()
|
||||
sys.modules["vllm.model_executor.custom_op"] = MagicMock(CustomOp=MockCustomOp)
|
||||
sys.modules["vllm.utils.torch_utils"] = MagicMock(direct_register_custom_op=noop)
|
||||
|
||||
# Mock any version checks by reading from compiled CI requirements
|
||||
with open(ROOT_DIR / "requirements/test.txt") as f:
|
||||
VERSIONS = dict(line.strip().split("==") for line in f if "==" in line)
|
||||
importlib.metadata.version = lambda name: VERSIONS.get(name) or "0.0.0"
|
||||
|
||||
# Make torch.nn.Parameter safe to inherit from
|
||||
sys.modules["torch.nn"] = MagicMock(Parameter=object)
|
||||
|
||||
|
||||
class PydanticMagicMock(MagicMock):
|
||||
@ -31,20 +55,17 @@ class PydanticMagicMock(MagicMock):
|
||||
return core_schema.any_schema()
|
||||
|
||||
|
||||
def auto_mock(module, attr, max_mocks=50):
|
||||
def auto_mock(module, attr, max_mocks=100):
|
||||
"""Function that automatically mocks missing modules during imports."""
|
||||
logger.info("Importing %s from %s", attr, module)
|
||||
for _ in range(max_mocks):
|
||||
try:
|
||||
# First treat attr as an attr, then as a submodule
|
||||
with patch("importlib.metadata.version", return_value="0.0.0"):
|
||||
return getattr(
|
||||
importlib.import_module(module),
|
||||
attr,
|
||||
importlib.import_module(f"{module}.{attr}"),
|
||||
)
|
||||
except importlib.metadata.PackageNotFoundError as e:
|
||||
raise e
|
||||
return getattr(
|
||||
importlib.import_module(module),
|
||||
attr,
|
||||
importlib.import_module(f"{module}.{attr}"),
|
||||
)
|
||||
except ModuleNotFoundError as e:
|
||||
logger.info("Mocking %s for argparse doc generation", e.name)
|
||||
sys.modules[e.name] = PydanticMagicMock(name=e.name)
|
||||
@ -139,10 +160,19 @@ def create_parser(add_cli_args, **kwargs) -> FlexibleArgumentParser:
|
||||
Returns:
|
||||
FlexibleArgumentParser: A parser with markdown formatting for the class.
|
||||
"""
|
||||
parser = FlexibleArgumentParser(add_json_tip=False)
|
||||
parser.formatter_class = MarkdownFormatter
|
||||
with patch("vllm.config.DeviceConfig.__post_init__"):
|
||||
_parser = add_cli_args(parser, **kwargs)
|
||||
try:
|
||||
parser = FlexibleArgumentParser(add_json_tip=False)
|
||||
parser.formatter_class = MarkdownFormatter
|
||||
with patch("vllm.config.DeviceConfig.__post_init__"):
|
||||
_parser = add_cli_args(parser, **kwargs)
|
||||
except ModuleNotFoundError as e:
|
||||
# Auto-mock runtime imports
|
||||
if tb_list := traceback.extract_tb(e.__traceback__):
|
||||
path = Path(tb_list[-1].filename).relative_to(ROOT_DIR)
|
||||
auto_mock(module=".".join(path.parent.parts), attr=path.stem)
|
||||
return create_parser(add_cli_args, **kwargs)
|
||||
else:
|
||||
raise e
|
||||
# add_cli_args might be in-place so return parser if _parser is None
|
||||
return _parser or parser
|
||||
|
||||
@ -184,3 +214,7 @@ def on_startup(command: Literal["build", "gh-deploy", "serve"], dirty: bool):
|
||||
with open(doc_path, "w", encoding="utf-8") as f:
|
||||
f.write(super(type(parser), parser).format_help())
|
||||
logger.info("Argparse generated: %s", doc_path.relative_to(ROOT_DIR))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
on_startup("build", False)
|
||||
|
||||
@ -404,6 +404,8 @@ th {
|
||||
| `OPTForCausalLM` | OPT, OPT-IML | `facebook/opt-66b`, `facebook/opt-iml-max-30b`, etc. | ✅︎ | ✅︎ |
|
||||
| `OrionForCausalLM` | Orion | `OrionStarAI/Orion-14B-Base`, `OrionStarAI/Orion-14B-Chat`, etc. | | ✅︎ |
|
||||
| `OuroForCausalLM` | ouro | `ByteDance/Ouro-1.4B`, `ByteDance/Ouro-2.6B`, etc. | ✅︎ | |
|
||||
| `PanguEmbeddedForCausalLM` |openPangu-Embedded-7B | `FreedomIntelligence/openPangu-Embedded-7B-V1.1` | ✅︎ | ✅︎ |
|
||||
| `PanguUltraMoEForCausalLM` |openpangu-ultra-moe-718b-model | `FreedomIntelligence/openPangu-Ultra-MoE-718B-V1.1` | ✅︎ | ✅︎ |
|
||||
| `PhiForCausalLM` | Phi | `microsoft/phi-1_5`, `microsoft/phi-2`, etc. | ✅︎ | ✅︎ |
|
||||
| `Phi3ForCausalLM` | Phi-4, Phi-3 | `microsoft/Phi-4-mini-instruct`, `microsoft/Phi-4`, `microsoft/Phi-3-mini-4k-instruct`, `microsoft/Phi-3-mini-128k-instruct`, `microsoft/Phi-3-medium-128k-instruct`, etc. | ✅︎ | ✅︎ |
|
||||
| `PhiMoEForCausalLM` | Phi-3.5-MoE | `microsoft/Phi-3.5-MoE-instruct`, etc. | ✅︎ | ✅︎ |
|
||||
@ -675,6 +677,7 @@ These models primarily accept the [`LLM.generate`](./generative_models.md#llmgen
|
||||
| `NVLM_D_Model` | NVLM-D 1.0 | T + I<sup>+</sup> | `nvidia/NVLM-D-72B`, etc. | | ✅︎ |
|
||||
| `Ovis` | Ovis2, Ovis1.6 | T + I<sup>+</sup> | `AIDC-AI/Ovis2-1B`, `AIDC-AI/Ovis1.6-Llama3.2-3B`, etc. | | ✅︎ |
|
||||
| `Ovis2_5` | Ovis2.5 | T + I<sup>+</sup> + V | `AIDC-AI/Ovis2.5-9B`, etc. | | |
|
||||
| `PaddleOCRVLForConditionalGeneration` | Paddle-OCR | T + I<sup>+</sup> | `PaddlePaddle/PaddleOCR-VL`, etc. | | |
|
||||
| `PaliGemmaForConditionalGeneration` | PaliGemma, PaliGemma 2 | T + I<sup>E</sup> | `google/paligemma-3b-pt-224`, `google/paligemma-3b-mix-224`, `google/paligemma2-3b-ft-docci-448`, etc. | | ✅︎ |
|
||||
| `Phi3VForCausalLM` | Phi-3-Vision, Phi-3.5-Vision | T + I<sup>E+</sup> | `microsoft/Phi-3-vision-128k-instruct`, `microsoft/Phi-3.5-vision-instruct`, etc. | | ✅︎ |
|
||||
| `Phi4MMForCausalLM` | Phi-4-multimodal | T + I<sup>+</sup> / T + A<sup>+</sup> / I<sup>+</sup> + A<sup>+</sup> | `microsoft/Phi-4-multimodal-instruct`, etc. | ✅︎ | ✅︎ |
|
||||
@ -758,6 +761,7 @@ Speech2Text models trained specifically for Automatic Speech Recognition.
|
||||
| `WhisperForConditionalGeneration` | Whisper | `openai/whisper-small`, `openai/whisper-large-v3-turbo`, etc. | | |
|
||||
| `VoxtralForConditionalGeneration` | Voxtral (Mistral format) | `mistralai/Voxtral-Mini-3B-2507`, `mistralai/Voxtral-Small-24B-2507`, etc. | ✅︎ | ✅︎ |
|
||||
| `Gemma3nForConditionalGeneration` | Gemma3n | `google/gemma-3n-E2B-it`, `google/gemma-3n-E4B-it`, etc. | | |
|
||||
| `GraniteSpeechForConditionalGeneration` | Granite Speech | `ibm-granite/granite-speech-3.3-2b`, `ibm-granite/granite-speech-3.3-8b`, etc. | ✅︎ | ✅︎ |
|
||||
|
||||
### Pooling Models
|
||||
|
||||
|
||||
@ -316,6 +316,10 @@ Traceback (most recent call last):
|
||||
|
||||
This indicates vLLM failed to initialize the NCCL communicator, possibly due to a missing `IPC_LOCK` linux capability or an unmounted `/dev/shm`. Refer to [Enabling GPUDirect RDMA](../serving/parallelism_scaling.md#enabling-gpudirect-rdma) for guidance on properly configuring the environment for GPUDirect RDMA.
|
||||
|
||||
## CUDA error: the provided PTX was compiled with an unsupported toolchain
|
||||
|
||||
If you see an error like `RuntimeError: CUDA error: the provided PTX was compiled with an unsupported toolchain.`, it means that the CUDA PTX in vLLM's wheels was compiled with a toolchain unsupported by your system. The released vLLM wheels have to be compiled with a specific version of CUDA toolkit, and the compiled code might fail to run on lower versions of CUDA drivers. Read [cuda compatibility](https://docs.nvidia.com/deploy/cuda-compatibility/) for more details. The solution is to install `cuda-compat` package from your package manager. For example, on Ubuntu, you can run `sudo apt-get install cuda-compat-12-9`, and then add `export LD_LIBRARY_PATH=/usr/local/cuda-12.9/compat:$LD_LIBRARY_PATH` to your `.bashrc` file. When successfully installed, you should see that the output of `nvidia-smi` will show `CUDA Version: 12.9`. Note that we use CUDA 12.9 as an example here, you may want to install a higher version of cuda-compat package in case vLLM's default CUDA version goes higher.
|
||||
|
||||
## Known Issues
|
||||
|
||||
- In `v0.5.2`, `v0.5.3`, and `v0.5.3.post1`, there is a bug caused by [zmq](https://github.com/zeromq/pyzmq/issues/2000) , which can occasionally cause vLLM to hang depending on the machine configuration. The solution is to upgrade to the latest version of `vllm` to include the [fix](https://github.com/vllm-project/vllm/pull/6759).
|
||||
|
||||
@ -6,8 +6,6 @@
|
||||
|
||||
V1 is now enabled by default for all supported use cases, and we will gradually enable it for every use case we plan to support. Please share any feedback on [GitHub](https://github.com/vllm-project/vllm) or in the [vLLM Slack](https://inviter.co/vllm-slack).
|
||||
|
||||
To disable V1, please set the environment variable as: `VLLM_USE_V1=0`, and send us a GitHub issue sharing the reason!
|
||||
|
||||
## Why vLLM V1?
|
||||
|
||||
vLLM V0 successfully supported a wide range of models and hardware, but as new features were developed independently, the system grew increasingly complex. This complexity made it harder to integrate new capabilities and introduced technical debt, revealing the need for a more streamlined and unified design.
|
||||
|
||||
@ -11,7 +11,7 @@ python save_sharded_state.py \
|
||||
--model /path/to/load \
|
||||
--quantization deepspeedfp \
|
||||
--tensor-parallel-size 8 \
|
||||
--output /path/to/save/sharded/modele
|
||||
--output /path/to/save/sharded/model
|
||||
|
||||
python load_sharded_state.py \
|
||||
--model /path/to/saved/sharded/model \
|
||||
|
||||
@ -33,6 +33,8 @@ Output: ' in the hands of the people.\n\nThe future of AI is in the'
|
||||
------------------------------------------------------------
|
||||
"""
|
||||
|
||||
from typing import Any
|
||||
|
||||
import torch
|
||||
|
||||
from vllm import LLM, SamplingParams
|
||||
@ -48,6 +50,16 @@ from vllm.v1.sample.logits_processor.builtin import process_dict_updates
|
||||
class DummyLogitsProcessor(LogitsProcessor):
|
||||
"""Fake logit processor to support unit testing and examples"""
|
||||
|
||||
@classmethod
|
||||
def validate_params(cls, params: SamplingParams):
|
||||
target_token: Any | None = params.extra_args and params.extra_args.get(
|
||||
"target_token"
|
||||
)
|
||||
if target_token is not None and not isinstance(target_token, int):
|
||||
raise ValueError(
|
||||
f"target_token value {target_token} {type(target_token)} is not int"
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self, vllm_config: VllmConfig, device: torch.device, is_pin_memory: bool
|
||||
):
|
||||
@ -57,14 +69,17 @@ class DummyLogitsProcessor(LogitsProcessor):
|
||||
return False
|
||||
|
||||
def update_state(self, batch_update: BatchUpdate | None):
|
||||
def extract_extra_arg(params: SamplingParams) -> int | None:
|
||||
self.validate_params(params)
|
||||
return params.extra_args and params.extra_args.get("target_token")
|
||||
|
||||
process_dict_updates(
|
||||
self.req_info,
|
||||
batch_update,
|
||||
# This function returns the LP's per-request state based on the
|
||||
# request details, or None if this LP does not apply to the
|
||||
# request.
|
||||
lambda params, _, __: params.extra_args
|
||||
and (params.extra_args.get("target_token")),
|
||||
lambda params, _, __: extract_extra_arg(params),
|
||||
)
|
||||
|
||||
def apply(self, logits: torch.Tensor) -> torch.Tensor:
|
||||
|
||||
@ -76,6 +76,14 @@ class WrappedPerReqLogitsProcessor(AdapterLogitsProcessor):
|
||||
"""Example of wrapping a fake request-level logit processor to create a
|
||||
batch-level logits processor"""
|
||||
|
||||
@classmethod
|
||||
def validate_params(cls, params: SamplingParams):
|
||||
target_token: Any | None = params.extra_args and params.extra_args.get(
|
||||
"target_token"
|
||||
)
|
||||
if target_token is not None and not isinstance(target_token, int):
|
||||
raise ValueError(f"target_token value {target_token} is not int")
|
||||
|
||||
def is_argmax_invariant(self) -> bool:
|
||||
return False
|
||||
|
||||
@ -101,13 +109,6 @@ class WrappedPerReqLogitsProcessor(AdapterLogitsProcessor):
|
||||
)
|
||||
if target_token is None:
|
||||
return None
|
||||
if not isinstance(target_token, int):
|
||||
logger.warning(
|
||||
"target_token value %s is not int; not applying logits"
|
||||
" processor to request.",
|
||||
target_token,
|
||||
)
|
||||
return None
|
||||
return DummyPerReqLogitsProcessor(target_token)
|
||||
|
||||
|
||||
|
||||
@ -77,6 +77,14 @@ class WrappedPerReqLogitsProcessor(AdapterLogitsProcessor):
|
||||
"""Example of overriding the wrapper class `__init__()` in order to utilize
|
||||
info about the device type"""
|
||||
|
||||
@classmethod
|
||||
def validate_params(cls, params: SamplingParams):
|
||||
target_token = params.extra_args and params.extra_args.get("target_token")
|
||||
if target_token is not None and not isinstance(target_token, int):
|
||||
raise ValueError(
|
||||
f"`target_token` has to be an integer, got {target_token}."
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self, vllm_config: VllmConfig, device: torch.device, is_pin_memory: bool
|
||||
):
|
||||
@ -113,13 +121,6 @@ class WrappedPerReqLogitsProcessor(AdapterLogitsProcessor):
|
||||
is None
|
||||
):
|
||||
return None
|
||||
if not isinstance(target_token, int):
|
||||
logger.warning(
|
||||
"target_token value %s is not int; not applying logits"
|
||||
" processor to request.",
|
||||
target_token,
|
||||
)
|
||||
return None
|
||||
return DummyPerReqLogitsProcessor(target_token)
|
||||
|
||||
|
||||
|
||||
@ -16,18 +16,18 @@ except ImportError:
|
||||
|
||||
QUESTION = "What is the content of each image?"
|
||||
IMAGE_URLS = [
|
||||
"https://upload.wikimedia.org/wikipedia/commons/d/da/2015_Kaczka_krzy%C5%BCowka_w_wodzie_%28samiec%29.jpg",
|
||||
"https://upload.wikimedia.org/wikipedia/commons/7/77/002_The_lion_king_Snyggve_in_the_Serengeti_National_Park_Photo_by_Giles_Laurent.jpg",
|
||||
"https://upload.wikimedia.org/wikipedia/commons/2/26/Ultramarine_Flycatcher_%28Ficedula_superciliaris%29_Naggar%2C_Himachal_Pradesh%2C_2013_%28cropped%29.JPG",
|
||||
"https://upload.wikimedia.org/wikipedia/commons/thumb/e/e5/Anim1754_-_Flickr_-_NOAA_Photo_Library_%281%29.jpg/2560px-Anim1754_-_Flickr_-_NOAA_Photo_Library_%281%29.jpg",
|
||||
"https://upload.wikimedia.org/wikipedia/commons/d/d4/Starfish%2C_Caswell_Bay_-_geograph.org.uk_-_409413.jpg",
|
||||
"https://upload.wikimedia.org/wikipedia/commons/6/69/Grapevinesnail_01.jpg",
|
||||
"https://upload.wikimedia.org/wikipedia/commons/thumb/0/0b/Texas_invasive_Musk_Thistle_1.jpg/1920px-Texas_invasive_Musk_Thistle_1.jpg",
|
||||
"https://upload.wikimedia.org/wikipedia/commons/thumb/7/7a/Huskiesatrest.jpg/2880px-Huskiesatrest.jpg",
|
||||
"https://upload.wikimedia.org/wikipedia/commons/thumb/6/68/Orange_tabby_cat_sitting_on_fallen_leaves-Hisashi-01A.jpg/1920px-Orange_tabby_cat_sitting_on_fallen_leaves-Hisashi-01A.jpg",
|
||||
"https://upload.wikimedia.org/wikipedia/commons/3/30/George_the_amazing_guinea_pig.jpg",
|
||||
"https://upload.wikimedia.org/wikipedia/commons/thumb/1/1f/Oryctolagus_cuniculus_Rcdo.jpg/1920px-Oryctolagus_cuniculus_Rcdo.jpg",
|
||||
"https://upload.wikimedia.org/wikipedia/commons/9/98/Horse-and-pony.jpg",
|
||||
"https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/duck.jpg",
|
||||
"https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/lion.jpg",
|
||||
"https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/flycatcher.jpeg",
|
||||
"https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/somefish.jpg",
|
||||
"https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/starfish.jpg",
|
||||
"https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/snail.jpg",
|
||||
"https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/thistle.jpg",
|
||||
"https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/husky.jpg",
|
||||
"https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/orangetabbycat.jpg",
|
||||
"https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/guineapig.jpg",
|
||||
"https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/rabbit.jpg",
|
||||
"https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/horsepony.jpg",
|
||||
]
|
||||
|
||||
|
||||
|
||||
@ -1242,6 +1242,32 @@ def run_ovis2_5(questions: list[str], modality: str) -> ModelRequestData:
|
||||
)
|
||||
|
||||
|
||||
# PaddleOCR-VL
|
||||
def run_paddleocr_vl(questions: list[str], modality: str) -> ModelRequestData:
|
||||
assert modality == "image"
|
||||
|
||||
model_name = "PaddlePaddle/PaddleOCR-VL"
|
||||
|
||||
engine_args = EngineArgs(
|
||||
model=model_name,
|
||||
max_model_len=4096,
|
||||
max_num_seqs=2,
|
||||
limit_mm_per_prompt={modality: 1},
|
||||
trust_remote_code=True,
|
||||
)
|
||||
|
||||
placeholder = "<|IMAGE_START|><|IMAGE_PLACEHOLDER|><|IMAGE_END|>"
|
||||
prompts = [
|
||||
(f"<|begin_of_sentence|>User: {question}{placeholder}\nAssistant: ")
|
||||
for question in questions
|
||||
]
|
||||
|
||||
return ModelRequestData(
|
||||
engine_args=engine_args,
|
||||
prompts=prompts,
|
||||
)
|
||||
|
||||
|
||||
# PaliGemma
|
||||
def run_paligemma(questions: list[str], modality: str) -> ModelRequestData:
|
||||
assert modality == "image"
|
||||
@ -1817,6 +1843,7 @@ model_example_map = {
|
||||
"NVLM_D": run_nvlm_d,
|
||||
"ovis": run_ovis,
|
||||
"ovis2_5": run_ovis2_5,
|
||||
"paddleocr_vl": run_paddleocr_vl,
|
||||
"paligemma": run_paligemma,
|
||||
"paligemma2": run_paligemma2,
|
||||
"phi3_v": run_phi3v,
|
||||
|
||||
@ -22,18 +22,18 @@ from vllm.utils.argparse_utils import FlexibleArgumentParser
|
||||
|
||||
QUESTION = "What is the content of each image?"
|
||||
IMAGE_URLS = [
|
||||
"https://upload.wikimedia.org/wikipedia/commons/d/da/2015_Kaczka_krzy%C5%BCowka_w_wodzie_%28samiec%29.jpg",
|
||||
"https://upload.wikimedia.org/wikipedia/commons/7/77/002_The_lion_king_Snyggve_in_the_Serengeti_National_Park_Photo_by_Giles_Laurent.jpg",
|
||||
"https://upload.wikimedia.org/wikipedia/commons/2/26/Ultramarine_Flycatcher_%28Ficedula_superciliaris%29_Naggar%2C_Himachal_Pradesh%2C_2013_%28cropped%29.JPG",
|
||||
"https://upload.wikimedia.org/wikipedia/commons/thumb/e/e5/Anim1754_-_Flickr_-_NOAA_Photo_Library_%281%29.jpg/2560px-Anim1754_-_Flickr_-_NOAA_Photo_Library_%281%29.jpg",
|
||||
"https://upload.wikimedia.org/wikipedia/commons/d/d4/Starfish%2C_Caswell_Bay_-_geograph.org.uk_-_409413.jpg",
|
||||
"https://upload.wikimedia.org/wikipedia/commons/6/69/Grapevinesnail_01.jpg",
|
||||
"https://upload.wikimedia.org/wikipedia/commons/thumb/0/0b/Texas_invasive_Musk_Thistle_1.jpg/1920px-Texas_invasive_Musk_Thistle_1.jpg",
|
||||
"https://upload.wikimedia.org/wikipedia/commons/thumb/7/7a/Huskiesatrest.jpg/2880px-Huskiesatrest.jpg",
|
||||
"https://upload.wikimedia.org/wikipedia/commons/thumb/6/68/Orange_tabby_cat_sitting_on_fallen_leaves-Hisashi-01A.jpg/1920px-Orange_tabby_cat_sitting_on_fallen_leaves-Hisashi-01A.jpg",
|
||||
"https://upload.wikimedia.org/wikipedia/commons/3/30/George_the_amazing_guinea_pig.jpg",
|
||||
"https://upload.wikimedia.org/wikipedia/commons/thumb/1/1f/Oryctolagus_cuniculus_Rcdo.jpg/1920px-Oryctolagus_cuniculus_Rcdo.jpg",
|
||||
"https://upload.wikimedia.org/wikipedia/commons/9/98/Horse-and-pony.jpg",
|
||||
"https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/duck.jpg",
|
||||
"https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/lion.jpg",
|
||||
"https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/flycatcher.jpeg",
|
||||
"https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/somefish.jpg",
|
||||
"https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/starfish.jpg",
|
||||
"https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/snail.jpg",
|
||||
"https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/thistle.jpg",
|
||||
"https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/husky.jpg",
|
||||
"https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/orangetabbycat.jpg",
|
||||
"https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/guineapig.jpg",
|
||||
"https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/rabbit.jpg",
|
||||
"https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/horsepony.jpg",
|
||||
]
|
||||
|
||||
|
||||
@ -801,6 +801,27 @@ def load_ovis2_5(question: str, image_urls: list[str]) -> ModelRequestData:
|
||||
)
|
||||
|
||||
|
||||
def load_paddleocr_vl(question: str, image_urls: list[str]) -> ModelRequestData:
|
||||
model_name = "PaddlePaddle/PaddleOCR-VL"
|
||||
|
||||
engine_args = EngineArgs(
|
||||
model=model_name,
|
||||
trust_remote_code=True,
|
||||
max_model_len=8192,
|
||||
max_num_seqs=2,
|
||||
limit_mm_per_prompt={"image": len(image_urls)},
|
||||
)
|
||||
|
||||
placeholders = "<|IMAGE_START|><|IMAGE_PLACEHOLDER|><|IMAGE_END|>" * len(image_urls)
|
||||
prompt = f"<|begin_of_sentence|>User: {question}{placeholders}\nAssistant: "
|
||||
|
||||
return ModelRequestData(
|
||||
engine_args=engine_args,
|
||||
prompt=prompt,
|
||||
image_data=[fetch_image(url) for url in image_urls],
|
||||
)
|
||||
|
||||
|
||||
def load_pixtral_hf(question: str, image_urls: list[str]) -> ModelRequestData:
|
||||
model_name = "mistral-community/pixtral-12b"
|
||||
|
||||
@ -1312,6 +1333,7 @@ model_example_map = {
|
||||
"NVLM_D": load_nvlm_d,
|
||||
"ovis": load_ovis,
|
||||
"ovis2_5": load_ovis2_5,
|
||||
"paddleocr_vl": load_paddleocr_vl,
|
||||
"phi3_v": load_phi3v,
|
||||
"phi4_mm": load_phi4mm,
|
||||
"phi4_multimodal": load_phi4_multimodal,
|
||||
|
||||
@ -19,3 +19,15 @@ This directory contains a Helm chart for deploying the vllm application. The cha
|
||||
- templates/pvc.yaml: Template for Persistent Volume Claims.
|
||||
- templates/secrets.yaml: Template for Kubernetes Secrets.
|
||||
- templates/service.yaml: Template for creating Services.
|
||||
|
||||
## Running Tests
|
||||
|
||||
This chart includes unit tests using [helm-unittest](https://github.com/helm-unittest/helm-unittest). Install the plugin and run tests:
|
||||
|
||||
```bash
|
||||
# Install plugin
|
||||
helm plugin install https://github.com/helm-unittest/helm-unittest
|
||||
|
||||
# Run tests
|
||||
helm unittest .
|
||||
```
|
||||
|
||||
@ -123,9 +123,6 @@ runAsUser:
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "chart.extraInitImage" -}}
|
||||
"amazon/aws-cli:2.6.4"
|
||||
{{- end }}
|
||||
|
||||
{{- define "chart.extraInitEnv" -}}
|
||||
- name: S3_ENDPOINT_URL
|
||||
@ -148,11 +145,15 @@ runAsUser:
|
||||
secretKeyRef:
|
||||
name: {{ .Release.Name }}-secrets
|
||||
key: s3accesskey
|
||||
{{- if .Values.extraInit.s3modelpath }}
|
||||
- name: S3_PATH
|
||||
value: "{{ .Values.extraInit.s3modelpath }}"
|
||||
{{- end }}
|
||||
{{- if hasKey .Values.extraInit "awsEc2MetadataDisabled" }}
|
||||
- name: AWS_EC2_METADATA_DISABLED
|
||||
value: "{{ .Values.extraInit.awsEc2MetadataDisabled }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Define chart labels
|
||||
|
||||
@ -72,16 +72,21 @@ spec:
|
||||
{{ toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.extraInit }}
|
||||
{{- if and .Values.extraInit (or .Values.extraInit.modelDownload.enabled .Values.extraInit.initContainers) }}
|
||||
initContainers:
|
||||
{{- if .Values.extraInit.modelDownload.enabled }}
|
||||
- name: wait-download-model
|
||||
image: {{ include "chart.extraInitImage" . }}
|
||||
command:
|
||||
- /bin/bash
|
||||
image: {{ .Values.extraInit.modelDownload.image.repository }}:{{ .Values.extraInit.modelDownload.image.tag }}
|
||||
imagePullPolicy: {{ .Values.extraInit.modelDownload.image.pullPolicy }}
|
||||
command: {{ .Values.extraInit.modelDownload.waitContainer.command | toJson }}
|
||||
args:
|
||||
- -eucx
|
||||
- while aws --endpoint-url $S3_ENDPOINT_URL s3 sync --dryrun s3://$S3_BUCKET_NAME/$S3_PATH /data | grep -q download; do sleep 10; done
|
||||
env: {{- include "chart.extraInitEnv" . | nindent 10 }}
|
||||
{{- toYaml .Values.extraInit.modelDownload.waitContainer.args | nindent 10 }}
|
||||
env:
|
||||
{{- if .Values.extraInit.modelDownload.waitContainer.env }}
|
||||
{{- toYaml .Values.extraInit.modelDownload.waitContainer.env | nindent 10 }}
|
||||
{{- else }}
|
||||
{{- include "chart.extraInitEnv" . | nindent 10 }}
|
||||
{{- end }}
|
||||
resources:
|
||||
requests:
|
||||
cpu: 200m
|
||||
@ -93,6 +98,10 @@ spec:
|
||||
- name: {{ .Release.Name }}-storage
|
||||
mountPath: /data
|
||||
{{- end }}
|
||||
{{- with .Values.extraInit.initContainers }}
|
||||
{{- toYaml . | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: {{ .Release.Name }}-storage
|
||||
persistentVolumeClaim:
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
{{- if .Values.extraInit }}
|
||||
{{- if and .Values.extraInit .Values.extraInit.modelDownload.enabled }}
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
@ -12,13 +12,17 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: job-download-model
|
||||
image: {{ include "chart.extraInitImage" . }}
|
||||
command:
|
||||
- /bin/bash
|
||||
image: {{ .Values.extraInit.modelDownload.image.repository }}:{{ .Values.extraInit.modelDownload.image.tag }}
|
||||
imagePullPolicy: {{ .Values.extraInit.modelDownload.image.pullPolicy }}
|
||||
command: {{ .Values.extraInit.modelDownload.downloadJob.command | toJson }}
|
||||
args:
|
||||
- -eucx
|
||||
- aws --endpoint-url $S3_ENDPOINT_URL s3 sync s3://$S3_BUCKET_NAME/$S3_PATH /data
|
||||
env: {{- include "chart.extraInitEnv" . | nindent 8 }}
|
||||
{{- toYaml .Values.extraInit.modelDownload.downloadJob.args | nindent 8 }}
|
||||
env:
|
||||
{{- if .Values.extraInit.modelDownload.downloadJob.env }}
|
||||
{{- toYaml .Values.extraInit.modelDownload.downloadJob.env | nindent 8 }}
|
||||
{{- else }}
|
||||
{{- include "chart.extraInitEnv" . | nindent 8 }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- name: {{ .Release.Name }}-storage
|
||||
mountPath: /data
|
||||
|
||||
135
examples/online_serving/chart-helm/tests/deployment_test.yaml
Normal file
135
examples/online_serving/chart-helm/tests/deployment_test.yaml
Normal file
@ -0,0 +1,135 @@
|
||||
suite: test deployment
|
||||
templates:
|
||||
- deployment.yaml
|
||||
tests:
|
||||
- it: should create wait-download-model init container when modelDownload is enabled
|
||||
set:
|
||||
extraInit:
|
||||
modelDownload:
|
||||
enabled: true
|
||||
image:
|
||||
repository: "amazon/aws-cli"
|
||||
tag: "2.6.4"
|
||||
pullPolicy: "IfNotPresent"
|
||||
waitContainer:
|
||||
command: [ "/bin/bash" ]
|
||||
args:
|
||||
- "-eucx"
|
||||
- "while aws --endpoint-url $S3_ENDPOINT_URL s3 sync --dryrun s3://$S3_BUCKET_NAME/$S3_PATH /data | grep -q download; do sleep 10; done"
|
||||
downloadJob:
|
||||
command: [ "/bin/bash" ]
|
||||
args:
|
||||
- "-eucx"
|
||||
- "aws --endpoint-url $S3_ENDPOINT_URL s3 sync s3://$S3_BUCKET_NAME/$S3_PATH /data"
|
||||
initContainers: [ ]
|
||||
pvcStorage: "1Gi"
|
||||
s3modelpath: "relative_s3_model_path/opt-125m"
|
||||
awsEc2MetadataDisabled: true
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: Deployment
|
||||
- isNotEmpty:
|
||||
path: spec.template.spec.initContainers
|
||||
- equal:
|
||||
path: spec.template.spec.initContainers[0].name
|
||||
value: wait-download-model
|
||||
- equal:
|
||||
path: spec.template.spec.initContainers[0].image
|
||||
value: amazon/aws-cli:2.6.4
|
||||
- equal:
|
||||
path: spec.template.spec.initContainers[0].imagePullPolicy
|
||||
value: IfNotPresent
|
||||
|
||||
- it: should only create custom init containers when modelDownload is disabled
|
||||
set:
|
||||
extraInit:
|
||||
modelDownload:
|
||||
enabled: false
|
||||
image:
|
||||
repository: "amazon/aws-cli"
|
||||
tag: "2.6.4"
|
||||
pullPolicy: "IfNotPresent"
|
||||
waitContainer:
|
||||
command: [ "/bin/bash" ]
|
||||
args: [ "-c", "echo test" ]
|
||||
downloadJob:
|
||||
command: [ "/bin/bash" ]
|
||||
args: [ "-c", "echo test" ]
|
||||
initContainers:
|
||||
- name: llm-d-routing-proxy
|
||||
image: ghcr.io/llm-d/llm-d-routing-sidecar:v0.2.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
name: proxy
|
||||
pvcStorage: "10Gi"
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: Deployment
|
||||
- lengthEqual:
|
||||
path: spec.template.spec.initContainers
|
||||
count: 1
|
||||
- equal:
|
||||
path: spec.template.spec.initContainers[0].name
|
||||
value: llm-d-routing-proxy
|
||||
- equal:
|
||||
path: spec.template.spec.initContainers[0].image
|
||||
value: ghcr.io/llm-d/llm-d-routing-sidecar:v0.2.0
|
||||
- equal:
|
||||
path: spec.template.spec.initContainers[0].ports[0].containerPort
|
||||
value: 8080
|
||||
|
||||
- it: should create both wait-download-model and custom init containers when both are enabled
|
||||
set:
|
||||
extraInit:
|
||||
modelDownload:
|
||||
enabled: true
|
||||
image:
|
||||
repository: "amazon/aws-cli"
|
||||
tag: "2.6.4"
|
||||
pullPolicy: "IfNotPresent"
|
||||
waitContainer:
|
||||
command: [ "/bin/bash" ]
|
||||
args:
|
||||
- "-eucx"
|
||||
- "while aws --endpoint-url $S3_ENDPOINT_URL s3 sync --dryrun s3://$S3_BUCKET_NAME/$S3_PATH /data | grep -q download; do sleep 10; done"
|
||||
downloadJob:
|
||||
command: [ "/bin/bash" ]
|
||||
args:
|
||||
- "-eucx"
|
||||
- "aws --endpoint-url $S3_ENDPOINT_URL s3 sync s3://$S3_BUCKET_NAME/$S3_PATH /data"
|
||||
initContainers:
|
||||
- name: llm-d-routing-proxy
|
||||
image: ghcr.io/llm-d/llm-d-routing-sidecar:v0.2.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
name: proxy
|
||||
pvcStorage: "10Gi"
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: Deployment
|
||||
- lengthEqual:
|
||||
path: spec.template.spec.initContainers
|
||||
count: 2
|
||||
- equal:
|
||||
path: spec.template.spec.initContainers[0].name
|
||||
value: wait-download-model
|
||||
- equal:
|
||||
path: spec.template.spec.initContainers[0].image
|
||||
value: amazon/aws-cli:2.6.4
|
||||
- equal:
|
||||
path: spec.template.spec.initContainers[1].name
|
||||
value: llm-d-routing-proxy
|
||||
- equal:
|
||||
path: spec.template.spec.initContainers[1].image
|
||||
value: ghcr.io/llm-d/llm-d-routing-sidecar:v0.2.0
|
||||
- equal:
|
||||
path: spec.template.spec.initContainers[1].ports[0].containerPort
|
||||
value: 8080
|
||||
61
examples/online_serving/chart-helm/tests/job_test.yaml
Normal file
61
examples/online_serving/chart-helm/tests/job_test.yaml
Normal file
@ -0,0 +1,61 @@
|
||||
suite: test job
|
||||
templates:
|
||||
- job.yaml
|
||||
tests:
|
||||
- it: should create job when modelDownload is enabled
|
||||
set:
|
||||
extraInit:
|
||||
modelDownload:
|
||||
enabled: true
|
||||
image:
|
||||
repository: "amazon/aws-cli"
|
||||
tag: "2.6.4"
|
||||
pullPolicy: "IfNotPresent"
|
||||
waitContainer:
|
||||
command: [ "/bin/bash" ]
|
||||
args: [ "-c", "wait" ]
|
||||
downloadJob:
|
||||
command: [ "/bin/bash" ]
|
||||
args:
|
||||
- "-eucx"
|
||||
- "aws --endpoint-url $S3_ENDPOINT_URL s3 sync s3://$S3_BUCKET_NAME/$S3_PATH /data"
|
||||
pvcStorage: "1Gi"
|
||||
s3modelpath: "relative_s3_model_path/opt-125m"
|
||||
awsEc2MetadataDisabled: true
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: Job
|
||||
- equal:
|
||||
path: spec.template.spec.containers[0].name
|
||||
value: job-download-model
|
||||
- equal:
|
||||
path: spec.template.spec.containers[0].image
|
||||
value: amazon/aws-cli:2.6.4
|
||||
- equal:
|
||||
path: spec.template.spec.restartPolicy
|
||||
value: OnFailure
|
||||
|
||||
- it: should not create job when modelDownload is disabled
|
||||
set:
|
||||
extraInit:
|
||||
modelDownload:
|
||||
enabled: false
|
||||
image:
|
||||
repository: "amazon/aws-cli"
|
||||
tag: "2.6.4"
|
||||
pullPolicy: "IfNotPresent"
|
||||
waitContainer:
|
||||
command: [ "/bin/bash" ]
|
||||
args: [ "-c", "wait" ]
|
||||
downloadJob:
|
||||
command: [ "/bin/bash" ]
|
||||
args: [ "-c", "download" ]
|
||||
initContainers:
|
||||
- name: llm-d-routing-proxy
|
||||
image: ghcr.io/llm-d/llm-d-routing-sidecar:v0.2.0
|
||||
pvcStorage: "10Gi"
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 0
|
||||
32
examples/online_serving/chart-helm/tests/pvc_test.yaml
Normal file
32
examples/online_serving/chart-helm/tests/pvc_test.yaml
Normal file
@ -0,0 +1,32 @@
|
||||
suite: test pvc
|
||||
templates:
|
||||
- pvc.yaml
|
||||
tests:
|
||||
# Test Case: PVC Created When extraInit Defined
|
||||
- it: should create pvc when extraInit is defined
|
||||
set:
|
||||
extraInit:
|
||||
modelDownload:
|
||||
enabled: true
|
||||
image:
|
||||
repository: "amazon/aws-cli"
|
||||
tag: "2.6.4"
|
||||
pullPolicy: "IfNotPresent"
|
||||
waitContainer:
|
||||
command: ["/bin/bash"]
|
||||
args: ["-c", "wait"]
|
||||
downloadJob:
|
||||
command: ["/bin/bash"]
|
||||
args: ["-c", "download"]
|
||||
pvcStorage: "10Gi"
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: PersistentVolumeClaim
|
||||
- equal:
|
||||
path: spec.accessModes[0]
|
||||
value: ReadWriteOnce
|
||||
- equal:
|
||||
path: spec.resources.requests.storage
|
||||
value: 10Gi
|
||||
@ -136,6 +136,70 @@
|
||||
"extraInit": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"modelDownload": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"image": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"repository": {
|
||||
"type": "string"
|
||||
},
|
||||
"tag": {
|
||||
"type": "string"
|
||||
},
|
||||
"pullPolicy": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": ["repository", "tag", "pullPolicy"]
|
||||
},
|
||||
"waitContainer": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"command": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"}
|
||||
},
|
||||
"args": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"}
|
||||
},
|
||||
"env": {
|
||||
"type": "array",
|
||||
"items": {"type": "object"}
|
||||
}
|
||||
},
|
||||
"required": ["command", "args"]
|
||||
},
|
||||
"downloadJob": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"command": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"}
|
||||
},
|
||||
"args": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"}
|
||||
},
|
||||
"env": {
|
||||
"type": "array",
|
||||
"items": {"type": "object"}
|
||||
}
|
||||
},
|
||||
"required": ["command", "args"]
|
||||
}
|
||||
},
|
||||
"required": ["enabled", "image", "waitContainer", "downloadJob"]
|
||||
},
|
||||
"initContainers": {
|
||||
"type": "array",
|
||||
"items": {"type": "object"}
|
||||
},
|
||||
"s3modelpath": {
|
||||
"type": "string"
|
||||
},
|
||||
@ -147,9 +211,9 @@
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"pvcStorage",
|
||||
"s3modelpath",
|
||||
"awsEc2MetadataDisabled"
|
||||
"modelDownload",
|
||||
"initContainers",
|
||||
"pvcStorage"
|
||||
]
|
||||
},
|
||||
"extraContainers": {
|
||||
|
||||
@ -75,10 +75,65 @@ maxUnavailablePodDisruptionBudget: ""
|
||||
|
||||
# -- Additional configuration for the init container
|
||||
extraInit:
|
||||
# -- Path of the model on the s3 which hosts model weights and config files
|
||||
# -- Model download functionality (optional)
|
||||
modelDownload:
|
||||
# -- Enable model download job and wait container
|
||||
enabled: true
|
||||
# -- Image configuration for model download operations
|
||||
image:
|
||||
# -- Image repository
|
||||
repository: "amazon/aws-cli"
|
||||
# -- Image tag
|
||||
tag: "2.6.4"
|
||||
# -- Image pull policy
|
||||
pullPolicy: "IfNotPresent"
|
||||
# -- Wait container configuration (init container that waits for model to be ready)
|
||||
waitContainer:
|
||||
# -- Command to execute
|
||||
command: ["/bin/bash"]
|
||||
# -- Arguments for the wait container
|
||||
args:
|
||||
- "-eucx"
|
||||
- "while aws --endpoint-url $S3_ENDPOINT_URL s3 sync --dryrun s3://$S3_BUCKET_NAME/$S3_PATH /data | grep -q download; do sleep 10; done"
|
||||
# -- Environment variables (optional, overrides S3 defaults entirely if specified)
|
||||
# env:
|
||||
# - name: HUGGING_FACE_HUB_TOKEN
|
||||
# value: "your-token"
|
||||
# - name: MODEL_ID
|
||||
# value: "meta-llama/Llama-2-7b"
|
||||
# -- Download job configuration (job that actually downloads the model)
|
||||
downloadJob:
|
||||
# -- Command to execute
|
||||
command: ["/bin/bash"]
|
||||
# -- Arguments for the download job
|
||||
args:
|
||||
- "-eucx"
|
||||
- "aws --endpoint-url $S3_ENDPOINT_URL s3 sync s3://$S3_BUCKET_NAME/$S3_PATH /data"
|
||||
# -- Environment variables (optional, overrides S3 defaults entirely if specified)
|
||||
# env:
|
||||
# - name: HUGGING_FACE_HUB_TOKEN
|
||||
# value: "your-token"
|
||||
# - name: MODEL_ID
|
||||
# value: "meta-llama/Llama-2-7b"
|
||||
|
||||
# -- Custom init containers (appended after wait-download-model if modelDownload is enabled)
|
||||
initContainers: []
|
||||
# Example for llm-d sidecar:
|
||||
# initContainers:
|
||||
# - name: llm-d-routing-proxy
|
||||
# image: ghcr.io/llm-d/llm-d-routing-sidecar:v0.2.0
|
||||
# imagePullPolicy: IfNotPresent
|
||||
# ports:
|
||||
# - containerPort: 8080
|
||||
# name: proxy
|
||||
# securityContext:
|
||||
# runAsUser: 1000
|
||||
|
||||
# -- Path of the model on the s3 which hosts model weights and config files
|
||||
s3modelpath: "relative_s3_model_path/opt-125m"
|
||||
# -- Storage size of the s3
|
||||
# -- Storage size for the PVC
|
||||
pvcStorage: "1Gi"
|
||||
# -- Disable AWS EC2 metadata service
|
||||
awsEc2MetadataDisabled: true
|
||||
|
||||
# -- Additional containers configuration
|
||||
|
||||
@ -112,8 +112,8 @@ def run_single_image(model: str, max_completion_tokens: int) -> None:
|
||||
|
||||
# Multi-image input inference
|
||||
def run_multi_image(model: str, max_completion_tokens: int) -> None:
|
||||
image_url_duck = "https://upload.wikimedia.org/wikipedia/commons/d/da/2015_Kaczka_krzy%C5%BCowka_w_wodzie_%28samiec%29.jpg"
|
||||
image_url_lion = "https://upload.wikimedia.org/wikipedia/commons/7/77/002_The_lion_king_Snyggve_in_the_Serengeti_National_Park_Photo_by_Giles_Laurent.jpg"
|
||||
image_url_duck = "https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/duck.jpg"
|
||||
image_url_lion = "https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/lion.jpg"
|
||||
chat_completion_from_url = client.chat.completions.create(
|
||||
messages=[
|
||||
{
|
||||
|
||||
@ -2,7 +2,7 @@
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
"""
|
||||
An example demonstrates how to use tool calling with reasoning models
|
||||
like QwQ-32B. The reasoning_content will not be parsed by the tool
|
||||
like QwQ-32B. The reasoning will not be parsed by the tool
|
||||
calling process; only the final output will be parsed.
|
||||
|
||||
To run this example, you need to start the vLLM server with both
|
||||
@ -78,7 +78,7 @@ messages = [
|
||||
|
||||
|
||||
def extract_reasoning_and_calls(chunks: list):
|
||||
reasoning_content = ""
|
||||
reasoning = ""
|
||||
tool_call_idx = -1
|
||||
arguments = []
|
||||
function_names = []
|
||||
@ -97,9 +97,9 @@ def extract_reasoning_and_calls(chunks: list):
|
||||
if tool_call.function.arguments:
|
||||
arguments[tool_call_idx] += tool_call.function.arguments
|
||||
else:
|
||||
if hasattr(chunk.choices[0].delta, "reasoning_content"):
|
||||
reasoning_content += chunk.choices[0].delta.reasoning_content
|
||||
return reasoning_content, arguments, function_names
|
||||
if hasattr(chunk.choices[0].delta, "reasoning"):
|
||||
reasoning += chunk.choices[0].delta.reasoning
|
||||
return reasoning, arguments, function_names
|
||||
|
||||
|
||||
def main():
|
||||
@ -115,7 +115,7 @@ def main():
|
||||
tool_calls = client.chat.completions.create(
|
||||
messages=messages, model=model, tools=tools
|
||||
)
|
||||
print(f"reasoning_content: {tool_calls.choices[0].message.reasoning_content}")
|
||||
print(f"reasoning: {tool_calls.choices[0].message.reasoning}")
|
||||
print(f"function name: {tool_calls.choices[0].message.tool_calls[0].function.name}")
|
||||
print(
|
||||
f"function arguments: "
|
||||
@ -129,9 +129,9 @@ def main():
|
||||
|
||||
chunks = list(tool_calls_stream)
|
||||
|
||||
reasoning_content, arguments, function_names = extract_reasoning_and_calls(chunks)
|
||||
reasoning, arguments, function_names = extract_reasoning_and_calls(chunks)
|
||||
|
||||
print(f"reasoning_content: {reasoning_content}")
|
||||
print(f"reasoning: {reasoning}")
|
||||
print(f"function name: {function_names[0]}")
|
||||
print(f"function arguments: {arguments[0]}")
|
||||
|
||||
@ -144,7 +144,7 @@ def main():
|
||||
)
|
||||
|
||||
tool_call = tool_calls.choices[0].message.tool_calls[0].function
|
||||
print(f"reasoning_content: {tool_calls.choices[0].message.reasoning_content}")
|
||||
print(f"reasoning: {tool_calls.choices[0].message.reasoning}")
|
||||
print(f"function name: {tool_call.name}")
|
||||
print(f"function arguments: {tool_call.arguments}")
|
||||
print("----------Stream Generate With Named Function Calling--------------")
|
||||
@ -159,8 +159,8 @@ def main():
|
||||
|
||||
chunks = list(tool_calls_stream)
|
||||
|
||||
reasoning_content, arguments, function_names = extract_reasoning_and_calls(chunks)
|
||||
print(f"reasoning_content: {reasoning_content}")
|
||||
reasoning, arguments, function_names = extract_reasoning_and_calls(chunks)
|
||||
print(f"reasoning: {reasoning}")
|
||||
print(f"function name: {function_names[0]}")
|
||||
print(f"function arguments: {arguments[0]}")
|
||||
print("\n\n")
|
||||
|
||||
@ -38,10 +38,10 @@ def main():
|
||||
# For granite, add: `extra_body={"chat_template_kwargs": {"thinking": True}}`
|
||||
response = client.chat.completions.create(model=model, messages=messages)
|
||||
|
||||
reasoning_content = response.choices[0].message.reasoning_content
|
||||
reasoning = response.choices[0].message.reasoning
|
||||
content = response.choices[0].message.content
|
||||
|
||||
print("reasoning_content for Round 1:", reasoning_content)
|
||||
print("reasoning for Round 1:", reasoning)
|
||||
print("content for Round 1:", content)
|
||||
|
||||
# Round 2
|
||||
@ -54,10 +54,10 @@ def main():
|
||||
)
|
||||
response = client.chat.completions.create(model=model, messages=messages)
|
||||
|
||||
reasoning_content = response.choices[0].message.reasoning_content
|
||||
reasoning = response.choices[0].message.reasoning
|
||||
content = response.choices[0].message.content
|
||||
|
||||
print("reasoning_content for Round 2:", reasoning_content)
|
||||
print("reasoning for Round 2:", reasoning)
|
||||
print("content for Round 2:", content)
|
||||
|
||||
|
||||
|
||||
@ -20,7 +20,7 @@ in real-time as they are generated by the model. This is useful for scenarios
|
||||
where you want to display chat completions to the user as they are generated
|
||||
by the model.
|
||||
|
||||
Remember to check content and reasoning_content exist in `ChatCompletionChunk`,
|
||||
Remember to check content and reasoning exist in `ChatCompletionChunk`,
|
||||
content may not exist leading to errors if you try to access it.
|
||||
"""
|
||||
|
||||
@ -47,22 +47,20 @@ def main():
|
||||
stream = client.chat.completions.create(model=model, messages=messages, stream=True)
|
||||
|
||||
print("client: Start streaming chat completions...")
|
||||
printed_reasoning_content = False
|
||||
printed_reasoning = False
|
||||
printed_content = False
|
||||
|
||||
for chunk in stream:
|
||||
# Safely extract reasoning_content and content from delta,
|
||||
# Safely extract reasoning and content from delta,
|
||||
# defaulting to None if attributes don't exist or are empty strings
|
||||
reasoning_content = (
|
||||
getattr(chunk.choices[0].delta, "reasoning_content", None) or None
|
||||
)
|
||||
reasoning = getattr(chunk.choices[0].delta, "reasoning", None) or None
|
||||
content = getattr(chunk.choices[0].delta, "content", None) or None
|
||||
|
||||
if reasoning_content is not None:
|
||||
if not printed_reasoning_content:
|
||||
printed_reasoning_content = True
|
||||
print("reasoning_content:", end="", flush=True)
|
||||
print(reasoning_content, end="", flush=True)
|
||||
if reasoning is not None:
|
||||
if not printed_reasoning:
|
||||
printed_reasoning = True
|
||||
print("reasoning:", end="", flush=True)
|
||||
print(reasoning, end="", flush=True)
|
||||
elif content is not None:
|
||||
if not printed_content:
|
||||
printed_content = True
|
||||
|
||||
@ -0,0 +1,83 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
"""
|
||||
Set up this example by starting a vLLM OpenAI-compatible server with tool call
|
||||
options enabled.
|
||||
Reasoning models can be used through the Responses API as seen here
|
||||
https://platform.openai.com/docs/api-reference/responses
|
||||
For example:
|
||||
vllm serve Qwen/Qwen3-1.7B --reasoning-parser qwen3 \
|
||||
--structured-outputs-config.backend xgrammar \
|
||||
--enable-auto-tool-choice --tool-call-parser hermes
|
||||
"""
|
||||
|
||||
import json
|
||||
|
||||
from openai import OpenAI
|
||||
from utils import get_first_model
|
||||
|
||||
|
||||
def get_weather(latitude: float, longitude: float) -> str:
|
||||
"""
|
||||
Mock function to simulate getting weather data.
|
||||
In a real application, this would call an external weather API.
|
||||
"""
|
||||
return f"Current temperature at ({latitude}, {longitude}) is 20°C."
|
||||
|
||||
|
||||
tools = [
|
||||
{
|
||||
"type": "function",
|
||||
"name": "get_weather",
|
||||
"description": "Get current temperature for provided coordinates in celsius.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"latitude": {"type": "number"},
|
||||
"longitude": {"type": "number"},
|
||||
},
|
||||
"required": ["latitude", "longitude"],
|
||||
"additionalProperties": False,
|
||||
},
|
||||
"strict": True,
|
||||
}
|
||||
]
|
||||
|
||||
input_messages = [
|
||||
{"role": "user", "content": "What's the weather like in Paris today?"}
|
||||
]
|
||||
|
||||
|
||||
def main():
|
||||
base_url = "http://0.0.0.0:8000/v1"
|
||||
client = OpenAI(base_url=base_url, api_key="empty")
|
||||
model = get_first_model(client)
|
||||
response = client.responses.create(
|
||||
model=model, input=input_messages, tools=tools, tool_choice="required"
|
||||
)
|
||||
|
||||
for out in response.output:
|
||||
if out.type == "function_call":
|
||||
print("Function call:", out.name, out.arguments)
|
||||
tool_call = out
|
||||
args = json.loads(tool_call.arguments)
|
||||
result = get_weather(args["latitude"], args["longitude"])
|
||||
|
||||
input_messages.append(tool_call) # append model's function call message
|
||||
input_messages.append(
|
||||
{ # append result message
|
||||
"type": "function_call_output",
|
||||
"call_id": tool_call.call_id,
|
||||
"output": str(result),
|
||||
}
|
||||
)
|
||||
response_2 = client.responses.create(
|
||||
model=model,
|
||||
input=input_messages,
|
||||
tools=tools,
|
||||
)
|
||||
print(response_2.output_text)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user