mirror of
https://github.com/vllm-project/vllm.git
synced 2025-10-20 23:03:52 +08:00
Compare commits
1821 Commits
submission
...
v0.5.2
Author | SHA1 | Date | |
---|---|---|---|
4cf256ae7f | |||
64fdc08c72 | |||
4ef95b0f06 | |||
eaec4b9153 | |||
a63a4c6341 | |||
c8fd97f26d | |||
94b82e8c18 | |||
6ae1597ddf | |||
22e79ee8f3 | |||
de19916314 | |||
69672f116c | |||
44874a0bf9 | |||
b47008b4d2 | |||
9bfece89fd | |||
32c9d7f765 | |||
ccb20db8bd | |||
a754dc2cb9 | |||
61e85dbad8 | |||
dbfe254eda | |||
73030b7dae | |||
ccd3c04571 | |||
9dad5cc859 | |||
6ef3bf912c | |||
540c0368b1 | |||
fb6af8bc08 | |||
eeceadaecc | |||
babf52dade | |||
9da4aad44b | |||
41708e5034 | |||
d80aef3776 | |||
e1684a766a | |||
a27f87da34 | |||
16ff6bd58c | |||
f8f9ff57ee | |||
6bc9710f6e | |||
111fc6e7ec | |||
75f64d8b94 | |||
21b2dcedab | |||
07b35af86d | |||
bb1a784b05 | |||
d719ba24c5 | |||
aa48e502fb | |||
4dbebd03cc | |||
b75bce1008 | |||
b039cbbce3 | |||
f9d25c2519 | |||
024ad87cdc | |||
aea19f0989 | |||
f7160d946a | |||
6047187cd8 | |||
b6c16cf8ff | |||
d26a8b3f1f | |||
d59eb98489 | |||
adf32e0a0f | |||
2b0fb53481 | |||
d6ab528997 | |||
7ed6a4f0e1 | |||
a4feba929b | |||
2d23b42d92 | |||
1df43de9bb | |||
52b7fcb35a | |||
b675069d74 | |||
55f692b46e | |||
8a1415cf77 | |||
546b101fa0 | |||
3963a5335b | |||
c4774eb841 | |||
fc17110bbe | |||
439c84581a | |||
99ded1e1c4 | |||
997df46a32 | |||
ae151d73be | |||
44cc76610d | |||
b422d4961a | |||
c38eba3046 | |||
e72ae80b06 | |||
8a924d2248 | |||
5ed3505d82 | |||
da78caecfa | |||
2416b26e11 | |||
d3a245138a | |||
673dd4cae9 | |||
4d6ada947c | |||
a0550cbc80 | |||
08c5bdecae | |||
5d5b4c5fe5 | |||
70c232f85a | |||
a3c9435d93 | |||
4f0e0ea131 | |||
ddc369fba1 | |||
185ad31f37 | |||
543aa48573 | |||
f7a8fa39d8 | |||
717f4bcea0 | |||
16620f439d | |||
3b08fe2b13 | |||
abfe705a02 | |||
333306a252 | |||
6206dcb29e | |||
9389380015 | |||
175c43eca4 | |||
bc96d5c330 | |||
f0250620dd | |||
2de490d60f | |||
79d406e918 | |||
abad5746a7 | |||
e58294ddf2 | |||
f1e15da6fe | |||
0097bb1829 | |||
ea4b570483 | |||
a41357e941 | |||
ae96ef8fbd | |||
69ec3ca14c | |||
81d7a50f24 | |||
27902d42be | |||
56b325e977 | |||
3dd507083f | |||
0ed646b7aa | |||
1dab9bc8a9 | |||
3de6e6a30e | |||
966fe72141 | |||
62963d129e | |||
d9e98f42e4 | |||
3c6325f0fc | |||
47f0954af0 | |||
7cd2ebb025 | |||
f1c78138aa | |||
3a86b54fb0 | |||
f666207161 | |||
d830656a97 | |||
d18bab3587 | |||
9831aec49f | |||
482045ee77 | |||
9d6a8daa87 | |||
ee93f4f92a | |||
7c008c51a9 | |||
4d26d806e1 | |||
c5832d2ae9 | |||
15aba081f3 | |||
31354e563f | |||
98d6682cd1 | |||
2c37540aa6 | |||
3476ed0809 | |||
54600709b6 | |||
e373853e12 | |||
c87ebc3ef9 | |||
c4059ea54f | |||
8e0817c262 | |||
83bdcb6ac3 | |||
12a59959ed | |||
dec6fc6f3b | |||
8893130b63 | |||
bb60326836 | |||
4050d646e5 | |||
d76084c12f | |||
80ca1e6a3a | |||
614aa51203 | |||
af9ad46fca | |||
7836fdcc11 | |||
deacb7ec44 | |||
f5e73c9f1b | |||
c6c240aa0a | |||
2be6955a3f | |||
9d47f64eb6 | |||
cff6a1fec1 | |||
bcc6a09b63 | |||
9def10664e | |||
75aa1442db | |||
99397da534 | |||
8dbfcd35bf | |||
f7dac83d95 | |||
7c01f70641 | |||
51e971d39e | |||
329df38f1a | |||
580353da93 | |||
ba4994443a | |||
906a19cdb0 | |||
c4bca740e8 | |||
7f83f40dee | |||
54814fd85b | |||
7041de4384 | |||
6a62cb82cc | |||
5d2a1a9cf0 | |||
4bf35ed9ae | |||
be0b3af9e0 | |||
2cd402e169 | |||
b185230744 | |||
6a2d659d28 | |||
b2c620230a | |||
b90d8cd832 | |||
3b752a6555 | |||
ec1ad0046c | |||
57f09a419c | |||
5932634409 | |||
5cbe8d155c | |||
0d0e3a42ac | |||
74d55c065b | |||
f136da15e1 | |||
c3dde367f1 | |||
64e8d2a783 | |||
79c92c7c8a | |||
736ed38849 | |||
365791ff81 | |||
691e29ecf3 | |||
3fd02bda51 | |||
98cf2ed678 | |||
e9d32d077d | |||
2061f0b8a7 | |||
96354d6a29 | |||
d12af207d2 | |||
6eabc6cb0e | |||
2110557dab | |||
b9e84259e9 | |||
294104c3f9 | |||
38a1674abb | |||
f5c8628fdc | |||
cbc53b6b8d | |||
c54269d967 | |||
5bfd1bbc98 | |||
6984c02a27 | |||
3439c5a8e3 | |||
6806998bf9 | |||
515080ad2f | |||
3aa7b6cf66 | |||
dda4811591 | |||
82079729cc | |||
c2a8ac75e0 | |||
f178e56c68 | |||
dd793d1de5 | |||
bc34937d68 | |||
dd248f7675 | |||
d9b34baedd | |||
c18ebfdd71 | |||
67882dbb44 | |||
7b99314301 | |||
2ce5d6688b | |||
f23871e9ee | |||
e9de9dd551 | |||
ba991d5c84 | |||
1744cc99ba | |||
e72dc6cb35 | |||
c246212952 | |||
edd5fe5fa2 | |||
5d4d90536f | |||
6c916ac8a8 | |||
832ea88fcb | |||
8c00f9c15d | |||
0cbc1d2b4f | |||
ff9ddbceee | |||
9c62db07ed | |||
cf90ae0123 | |||
f5dda63eb5 | |||
7187507301 | |||
f1e72cc19a | |||
5b15bde539 | |||
bd620b01fb | |||
d9a252bc8e | |||
67005a07bc | |||
c35e4a3dd7 | |||
1f5674218f | |||
b12518d3cf | |||
6c5b7af152 | |||
8065a7e220 | |||
3f3b6b2150 | |||
a7dcc62086 | |||
ad137cd111 | |||
111af1fa2c | |||
1b2eaac316 | |||
3730a1c832 | |||
949e49a685 | |||
4a30d7e3cc | |||
e83db9e7e3 | |||
78687504f7 | |||
d571ca0108 | |||
afed90a034 | |||
3ee5c4bca5 | |||
e9c2732b97 | |||
d8714530d1 | |||
7d46c8d378 | |||
da971ec7a5 | |||
3eea74889f | |||
f758aed0e8 | |||
e5150f2c28 | |||
59a1eb59c9 | |||
6820724e51 | |||
b23ce92032 | |||
2bd231a7b7 | |||
8a173382c8 | |||
07feecde1a | |||
19091efc44 | |||
95db455e7f | |||
7879f24dcc | |||
13db4369d9 | |||
4ad7b53e59 | |||
f0cc0e68e3 | |||
db5ec52ad7 | |||
114d7270ff | |||
32c86e494a | |||
8eadcf0b90 | |||
5002175e80 | |||
daef218b55 | |||
fa9e385229 | |||
26e1188e51 | |||
a3e8a05d4c | |||
e441bad674 | |||
1b44aaf4e3 | |||
9e4e6fe207 | |||
ab66536dbf | |||
728c4c8a06 | |||
1f12122b17 | |||
890d8d960b | |||
9e74d9d003 | |||
9333fb8eb9 | |||
e2b85cf86a | |||
845a3f26f9 | |||
f07d513320 | |||
4a6769053a | |||
f31c1f90e3 | |||
3ce2c050dd | |||
1c0afa13c5 | |||
d919ecc771 | |||
e691918e3b | |||
81fbb3655f | |||
0e9164b40a | |||
1b8a0d71cf | |||
bd7efe95d0 | |||
f5bb85b435 | |||
28c145eb57 | |||
e2afb03c92 | |||
6e2527a7cb | |||
cdab68dcdb | |||
d1c3d7d139 | |||
77490c6f2f | |||
48f589e18b | |||
348616ac4b | |||
15985680e2 | |||
d74674bbd9 | |||
703475f6c2 | |||
d47af2bc02 | |||
319ad7f1d3 | |||
0f0d8bc065 | |||
55d6361b13 | |||
cd9c0d65d9 | |||
50eed24d25 | |||
e38042d4af | |||
33e3b37242 | |||
1696efe6c9 | |||
6b0511a57b | |||
a8fda4f661 | |||
30299a41fa | |||
85657b5607 | |||
0ce7b952f8 | |||
39873476f8 | |||
03dccc886e | |||
a65634d3ae | |||
80aa7e91fc | |||
bd43973522 | |||
23ec72fa03 | |||
c2637a613b | |||
88407532e7 | |||
916d219d62 | |||
ea3890a5f0 | |||
2135cacb45 | |||
7d19de2e9c | |||
94a07bbdd8 | |||
b8d4dfff9c | |||
622d45128c | |||
51602eefd3 | |||
5cc50a531f | |||
5985e3427d | |||
8b82a89997 | |||
c3c2903e72 | |||
1a8bfd92d5 | |||
847cdcca1c | |||
e3c12bf6d2 | |||
3dd6853bc8 | |||
8f89d72090 | |||
99dac099ab | |||
c4bd03c7c5 | |||
dcbf4286af | |||
00e6a2dc53 | |||
2e02311a1b | |||
89ec06c33b | |||
9fde251bf0 | |||
4c2ffb28ff | |||
246598a6b1 | |||
8bab4959be | |||
3c4cebf751 | |||
d8f31f2f8b | |||
640052b069 | |||
351d5e7b82 | |||
a008629807 | |||
76477a93b7 | |||
77c87beb06 | |||
114332b88e | |||
cb77ad836f | |||
856c990041 | |||
c5602f0baa | |||
f7f9c5f97b | |||
2c0d933594 | |||
774d1035e4 | |||
6b29d6fe70 | |||
0bfa1c4f13 | |||
c81da5f56d | |||
68bc81703e | |||
5884c2b454 | |||
45f92c00cf | |||
5467ac3196 | |||
5d7e3d0176 | |||
0373e1837e | |||
c09dade2a2 | |||
8ea5e44a43 | |||
9fb900f90c | |||
c96fc06747 | |||
b3376e5c76 | |||
e69ded7d1c | |||
767c727a81 | |||
6840a71610 | |||
7a9cb294ae | |||
ca3ea51bde | |||
dc49fb892c | |||
18a277b52d | |||
8d75fe48ca | |||
388596c914 | |||
baa15a9ec3 | |||
15063741e3 | |||
ccdc490dda | |||
a31cab7556 | |||
828da0d44e | |||
abe855d637 | |||
4efff036f0 | |||
89c920785f | |||
7b0a0dfb22 | |||
3a6ae1d33c | |||
8f1729b829 | |||
6a7c7711a2 | |||
0f83ddd4d7 | |||
065aff6c16 | |||
3d33e372a1 | |||
faf71bcd4b | |||
f270a39537 | |||
51a08e7d8f | |||
eb8fcd2666 | |||
5563a4dea8 | |||
ccd4f129e8 | |||
02cc3b51a7 | |||
d5b1eb081e | |||
f0a500545f | |||
c65146e75e | |||
41ca62cf03 | |||
974fc9b845 | |||
fee4dcc33a | |||
650a4cc55e | |||
9ca62d8668 | |||
45c35f0d58 | |||
9ba093b4f4 | |||
27208be66e | |||
87d5abef75 | |||
ec784b2526 | |||
a58f24e590 | |||
f42a006b15 | |||
3a434b07ed | |||
bd0e7802e0 | |||
06b2550cbb | |||
f775a07e30 | |||
4f0d17c05c | |||
10c38e3e46 | |||
cafb8e06c5 | |||
cbb2f59cc8 | |||
0ab278ca31 | |||
7a64d24aad | |||
dfbe60dc62 | |||
a66cf40b20 | |||
f790ad3c50 | |||
ed59a7ed23 | |||
044793d8df | |||
c2d6d2f960 | |||
8279078e21 | |||
b9c0605a8e | |||
37464a0f74 | |||
c354072828 | |||
f081c3ce4b | |||
260d119e86 | |||
a360ff80bb | |||
1197e02141 | |||
657579113f | |||
e9899fb7a4 | |||
a377f0bd5e | |||
e9d3aa04f6 | |||
a22dea54d3 | |||
533c217792 | |||
6d21fa1cad | |||
b35be5403f | |||
45a1a69b98 | |||
87a658c812 | |||
429d89720e | |||
a9bcc7afb2 | |||
d79d9eaaff | |||
f758505c73 | |||
d910816c73 | |||
87d41c849d | |||
e07aff9e52 | |||
5bf185a1c4 | |||
4fbcb0f27e | |||
7c3604fb68 | |||
b1c255630d | |||
eb6c50cdc2 | |||
eecd864388 | |||
ae495c74ea | |||
4238bc82f2 | |||
594392d27a | |||
18c1f16d86 | |||
5bd3c65072 | |||
616e600e0b | |||
dfba529b40 | |||
5ae5ed1e60 | |||
290f4ada2b | |||
dd8de11f0a | |||
9ba415588a | |||
d4f3985907 | |||
890aa93d27 | |||
fbdb7b3ee2 | |||
1102bef219 | |||
f17a1a8f96 | |||
d5a1697772 | |||
325c119961 | |||
8e192ff967 | |||
e64fde4b01 | |||
919770957f | |||
6a50f4cafa | |||
e3470f8753 | |||
a1242324c9 | |||
5eda2ea02a | |||
2ba80bed27 | |||
6066253296 | |||
ee3eea0a1b | |||
a36de682d4 | |||
eb6d3c264d | |||
97b030005c | |||
a3a73ab069 | |||
8674f9880e | |||
c74c913bfb | |||
5f6d10c14c | |||
9b9a10d6cb | |||
99eff67ba9 | |||
14772eeb8e | |||
757b62c495 | |||
e941f88584 | |||
f12c3b5b3d | |||
d130b573a0 | |||
65ae8c2c8f | |||
c3af44722c | |||
1937e29848 | |||
f0eecee610 | |||
943e72ca56 | |||
546a97ef69 | |||
da5a0b539d | |||
6287537a0c | |||
b57e6c5949 | |||
27ce85476e | |||
f68470e803 | |||
2e9a2227ec | |||
c0724fc915 | |||
86b45ae065 | |||
c5711ef985 | |||
48d5985a08 | |||
33e0823de5 | |||
26148120b3 | |||
0150a10630 | |||
8e7fb5d43a | |||
9a31a817a8 | |||
2060e93659 | |||
8435b207af | |||
10fa9eea21 | |||
e08188081b | |||
b5853f9963 | |||
f09edd8a25 | |||
6979ade384 | |||
9216b9cc38 | |||
5e0391c040 | |||
dbc0754ddf | |||
99caa49106 | |||
5c342570d7 | |||
973617ae02 | |||
30e754390c | |||
52f8107cf2 | |||
fc0d9dfc3a | |||
361c461a12 | |||
a5675d348b | |||
e9cdd2b1e2 | |||
65bf2ac165 | |||
8a7cc254a0 | |||
29bc01bf3b | |||
676a99982f | |||
dc72402b57 | |||
ccb63a8245 | |||
c579b750a0 | |||
4bfa7e7f75 | |||
ac1fbf7fd2 | |||
33d3914b1e | |||
1356df53bd | |||
ce532ff45c | |||
8bc68e198c | |||
0fca3cdcf2 | |||
e7c46b9527 | |||
350f9e107f | |||
702bee461f | |||
a7be4d0072 | |||
a709e87a4f | |||
6eaccb7353 | |||
e254497b66 | |||
4e12131089 | |||
fcc2994be6 | |||
2e7796f2cf | |||
706588a77d | |||
6a0f617210 | |||
dac6a3f6ed | |||
64b77dfd7e | |||
51d4094fda | |||
e965d46184 | |||
208b71bcc1 | |||
c833101740 | |||
379da6dcb5 | |||
ebce310b74 | |||
be0c5180ac | |||
cea64430f6 | |||
a3c124570a | |||
ff5abcd746 | |||
0ee535b294 | |||
190bc838e1 | |||
f12b20decc | |||
16bc0a098f | |||
e288df0632 | |||
8b9241be3a | |||
f942efb5a3 | |||
89579a201f | |||
230c4b38c1 | |||
20cfcdec99 | |||
ad932a221d | |||
5510cf0e8a | |||
0f9a6e3d22 | |||
f6a593093a | |||
d7740ea4dc | |||
cc466a3290 | |||
8344f7742b | |||
469f85c782 | |||
10760da800 | |||
478aed5827 | |||
63575bc2e1 | |||
a98187cf72 | |||
bd99d22629 | |||
19cb4716ee | |||
e186d37cb1 | |||
323f27b904 | |||
0650e5935b | |||
c7f2cf2b7f | |||
8d8357c8ed | |||
4302987069 | |||
021b1a2ab7 | |||
2a052011ca | |||
36fb68f947 | |||
bc8ad68455 | |||
344bf7cd2d | |||
ab50275111 | |||
43c413ec57 | |||
f8e7adda21 | |||
7e65477e5e | |||
3521ba4f25 | |||
2d7bce9cd5 | |||
ce3f1eedf8 | |||
808632d3b4 | |||
344a5d0c33 | |||
0f8a91401c | |||
9b5c9f9484 | |||
32881f3f31 | |||
5b8a7c1cb0 | |||
1ff0c73a79 | |||
5ad60b0cbd | |||
fb087af52e | |||
7038e8b803 | |||
2a85f93007 | |||
cf8cac8c70 | |||
5e401bce17 | |||
0d62fe58db | |||
b8afa8b95a | |||
826b82a260 | |||
c9d852d601 | |||
6ef09b08f8 | |||
3a922c1e7e | |||
c47ba4aaa9 | |||
24bb4fe432 | |||
a657bfc48a | |||
24750f4cad | |||
b38e42fbca | |||
8b798eec75 | |||
69909126a7 | |||
e491c7e053 | |||
4dc8026d86 | |||
a88bb9b032 | |||
6f1df80436 | |||
d6f4bd7cdd | |||
c3845d82dc | |||
a822eb3413 | |||
f458112e8a | |||
2e240c69a9 | |||
ee37328da0 | |||
6ad58f42c5 | |||
dd1a50a8bc | |||
715c2d854d | |||
a494140433 | |||
111815d482 | |||
b31a1fb63c | |||
4bb53e2dde | |||
26f2fb5113 | |||
fa32207842 | |||
d627a3d837 | |||
f4f921b7f1 | |||
ac5ccf0156 | |||
73c8d677e5 | |||
df29793dc7 | |||
03dd7d52bf | |||
bf480c5302 | |||
9c7306ac11 | |||
4ea1f9678d | |||
ba4be44c32 | |||
d6e520e170 | |||
81661da7b2 | |||
dfea173148 | |||
7134303cbb | |||
3da24c2df7 | |||
eefeb16464 | |||
18d23f642a | |||
87f545ba6f | |||
8947bc3c15 | |||
12628d3c78 | |||
258a2c58d0 | |||
aba47be3fe | |||
a62aaf1df5 | |||
603ad84815 | |||
a88081bf76 | |||
2f30e7c72f | |||
a74dee9b62 | |||
cf29b7eda4 | |||
efffb63f58 | |||
15e7c675b0 | |||
b6dcb4d442 | |||
b5b4a398a7 | |||
f4bc4de1b1 | |||
bd7a8eef25 | |||
7ee82bef1e | |||
fbf152d976 | |||
479d69fad0 | |||
96e90fdeb3 | |||
a395a638c2 | |||
2768884ac4 | |||
aae08249ac | |||
7923dcad12 | |||
3cd9b5bb2d | |||
468d761b32 | |||
e4bf860a54 | |||
91f50a6fe2 | |||
79a268c4ab | |||
eace8bf0b9 | |||
1e8f4252aa | |||
2b7949c1c2 | |||
62b5166bd4 | |||
d86285a4a4 | |||
d87f39e9a9 | |||
d3c8180ac4 | |||
62b8aebc6f | |||
050f285ff6 | |||
8f2ea22bde | |||
0ae11f78ab | |||
34128a697e | |||
c1b4e4157c | |||
ceaf4ed003 | |||
ad8d696a99 | |||
3d925165f2 | |||
1543680691 | |||
077f0a2e8a | |||
e73ed0f1c6 | |||
296cdf8ac7 | |||
747b1a7147 | |||
95e5b087cf | |||
a37d815b83 | |||
7f2593b164 | |||
fe7d648fe5 | |||
cc74b2b232 | |||
91528575ec | |||
a22cdea371 | |||
682789d402 | |||
138485a82d | |||
bc9df1571b | |||
15b86408a8 | |||
7be4f5628f | |||
8f20fc04bf | |||
221d93ecbf | |||
d17c8477f1 | |||
a134ef6f5e | |||
8a7a3e4436 | |||
8f9c28fd40 | |||
cd2f63fb36 | |||
87fa80c91f | |||
e1bb2fd52d | |||
705578ae14 | |||
e8cc7967ff | |||
53b018edcb | |||
66ded03067 | |||
6dc1fc9cfe | |||
533d2a1f39 | |||
a53222544c | |||
fe3b5bbc23 | |||
8438e0569e | |||
11d652bd4f | |||
d150e4f89f | |||
e95cd87959 | |||
69e1d2fb69 | |||
05434764cd | |||
4e7ee664e2 | |||
37e84a403d | |||
4695397dcf | |||
d619ae2d19 | |||
eb46fbfda2 | |||
0003e9154b | |||
e11e200736 | |||
8db1bf32f8 | |||
aceb17cf2d | |||
563c54f760 | |||
2cd6b4f362 | |||
711a000255 | |||
989ae2538d | |||
0a430b4ae2 | |||
ec8e3c695f | |||
98afde19fc | |||
5c2e66e487 | |||
546e721168 | |||
b8aacac31a | |||
d04973ad54 | |||
fbb9d9eef4 | |||
09473ee41c | |||
d4ec9ffb95 | |||
96b6a6d790 | |||
36729bac13 | |||
7fd3949a0b | |||
1096717ae9 | |||
c2b4a1bce9 | |||
e46a60aa4c | |||
1e96c3341a | |||
95e7d4a97c | |||
559eb852f8 | |||
a10d3056da | |||
8afca50889 | |||
08ccee1e83 | |||
c1dc547129 | |||
f3d0bf7589 | |||
e9da5a40c6 | |||
e42df7227d | |||
caada5e50a | |||
67b4221a61 | |||
63e7176f26 | |||
934d3662f7 | |||
92cd2e2f21 | |||
e4c4072c94 | |||
e35397468f | |||
8b317c6dd0 | |||
bd3c144e0b | |||
0258b7a94b | |||
b3104b2a10 | |||
c2e00af523 | |||
c013d32c75 | |||
11dd6ebb89 | |||
6c0b04515f | |||
e23a43aef8 | |||
e7c7067b45 | |||
6d592eb430 | |||
d036198e23 | |||
59a6abf3c9 | |||
bc0c0192d1 | |||
f46864d68d | |||
b4543c8f6b | |||
0ce0539d47 | |||
2f19283549 | |||
95baec828f | |||
e4be7d70bb | |||
54951ac4bf | |||
18de883489 | |||
1d7c940d74 | |||
cfaf49a167 | |||
9edec652e2 | |||
e0dd4d3589 | |||
e5043a3e75 | |||
d03d64fd2e | |||
78107fa091 | |||
c391e4b68e | |||
9117f892f0 | |||
db2a6a41e2 | |||
ca81ff5196 | |||
b7782002e1 | |||
819a309c0f | |||
aabe8f40f2 | |||
498eb5cfa3 | |||
537ee25f43 | |||
294f8f6665 | |||
b95047f2da | |||
2ff767b513 | |||
3dcb3e8b98 | |||
c64cf38673 | |||
76b889bf1d | |||
c9b506dad4 | |||
5757d90e26 | |||
a3c226e7eb | |||
b321d4881b | |||
ad6eca408b | |||
205b94942e | |||
3bec41f41a | |||
0739b1947f | |||
77a6572aa5 | |||
0e3f06fe9c | |||
eb69d68804 | |||
7d4e1b85e7 | |||
93deb0b38f | |||
ccb58b23e6 | |||
49782fcb76 | |||
f03cc667a0 | |||
563c1d7ec5 | |||
9c82a1bec3 | |||
b6d103542c | |||
51c31bc10c | |||
3ad438c66f | |||
203d4f82ac | |||
991143cfcd | |||
8b2d3cbc1b | |||
9765b5c406 | |||
430530fc18 | |||
97356f3c7e | |||
f510395bbf | |||
6110c39dc8 | |||
d8658c8cc1 | |||
7bc94a0fdd | |||
756b30a5f3 | |||
395aa823ea | |||
26422e477b | |||
f342153b48 | |||
27a57cad52 | |||
98a42e7078 | |||
0267fef52a | |||
4716a32dd4 | |||
c0935c96d3 | |||
cb40b3ab6b | |||
515386ef3c | |||
a4075cba4d | |||
96aa014d1e | |||
1715056fef | |||
b51c1cc9d2 | |||
ce567a2926 | |||
d6ea427f04 | |||
14ccd94c89 | |||
8267b06c30 | |||
3492859b68 | |||
098e1776ba | |||
10e6322283 | |||
6d9aa00fc4 | |||
1182607e18 | |||
45b6ef6513 | |||
1956931436 | |||
e24336b5a7 | |||
d18f4e73f3 | |||
82c540bebf | |||
8f44facddd | |||
e66b629c04 | |||
76879342a3 | |||
566b57c5c4 | |||
0dc72273b8 | |||
a979d9771e | |||
8af890a865 | |||
dfeb2ecc3a | |||
3a243095e5 | |||
64172a976c | |||
f408d05c52 | |||
0b4997e05c | |||
c13ad1b7bd | |||
819924e749 | |||
01bfb22b41 | |||
e67c295b0c | |||
925f3332ca | |||
b0dfa91dd7 | |||
56a8652f33 | |||
6d93d35308 | |||
837e185142 | |||
42bc386129 | |||
8b268a46a7 | |||
41deac4a3d | |||
af9e53496f | |||
f8a12ecc7f | |||
3c5ab9b811 | |||
743a0b7402 | |||
bfdb1ba5c3 | |||
cf2f084d56 | |||
f721096d48 | |||
e90fc21f2e | |||
ea5f14e6ff | |||
b7050ca7df | |||
c188ecb080 | |||
865732342b | |||
4c07dd28c0 | |||
3bbff9e5ab | |||
6ebd02bdef | |||
523e30ea0c | |||
f1c0fc3919 | |||
6e435de766 | |||
426ec4ec67 | |||
80e254834d | |||
ba8ae1d84f | |||
84eaa68425 | |||
5ee14494e4 | |||
4ad521d8b5 | |||
9474e89ba4 | |||
20478c4d3a | |||
63e8b28a99 | |||
cc63d03fbb | |||
2a60c9bd17 | |||
c614cfee58 | |||
7341c77d69 | |||
ef65dcfa6f | |||
6a9c583e73 | |||
b37cdce2b1 | |||
b30880a762 | |||
49eedea373 | |||
9fdf3de346 | |||
c0c17d4896 | |||
097aa0ea22 | |||
482b0adf1b | |||
8c654c045f | |||
9101d832e6 | |||
93348d9458 | |||
abfc4f3387 | |||
6b78837b29 | |||
120157fd2a | |||
8e67598aa6 | |||
ad50bf4b25 | |||
cf6ff18246 | |||
14e3f9a1b2 | |||
3123f15138 | |||
413366e9a2 | |||
10585e035e | |||
fb96c1e98c | |||
8fa7357f2d | |||
a7af4538ca | |||
604f235937 | |||
14b8ae02e7 | |||
03d37f2441 | |||
a7c871680e | |||
429284dc37 | |||
253a98078a | |||
21539e6856 | |||
b522c4476f | |||
78b6c4845a | |||
b983ba35bd | |||
54be8a0be2 | |||
dfc77408bd | |||
c17ca8ef18 | |||
06ec486794 | |||
8fe8386591 | |||
a37415c31b | |||
81653d9688 | |||
eeab52a4ff | |||
c33afd89f5 | |||
7e9bd08f60 | |||
ae0ccb4017 | |||
739c350c19 | |||
ba8dc958a3 | |||
e221910e77 | |||
b167109ba1 | |||
602358f8a8 | |||
49a3c8662b | |||
b0925b3878 | |||
654865e21d | |||
c9415c19d3 | |||
4c922709b6 | |||
657061fdce | |||
2f8844ba08 | |||
4b59f00e91 | |||
9e8744a545 | |||
e4a28e5316 | |||
0bba88df03 | |||
8437bae6ef | |||
f48c6791b7 | |||
c2c5e0909a | |||
1cb0cc2975 | |||
99c3cfb83c | |||
1ece1ae829 | |||
c59e120c55 | |||
d2339d6840 | |||
b35cc93420 | |||
8cbba4622c | |||
385da2dae2 | |||
2daf23ab0c | |||
cbf4c05b15 | |||
d3c04b6a39 | |||
4cb3b924cd | |||
a33ce60c66 | |||
24aecf421a | |||
2efce05dc3 | |||
8999ec3c16 | |||
05af6da8d9 | |||
9a4548bae7 | |||
ff578cae54 | |||
22de45235c | |||
76e8a70476 | |||
9cbc7e5f3b | |||
27a7b070db | |||
901cf4c52b | |||
d0fae88114 | |||
17c3103c56 | |||
996d095c54 | |||
d65fac2738 | |||
ce4f5a29fb | |||
baee28c46c | |||
29e70e3e88 | |||
82091b864a | |||
c0c2335ce0 | |||
90fbf12540 | |||
49d849b3ab | |||
27ca23dc00 | |||
54d3544784 | |||
703e42ee4b | |||
29a8d6a554 | |||
2c08ff23c0 | |||
bfdcfa6a05 | |||
9289e577ec | |||
a6d471c759 | |||
01a5d18a53 | |||
929b4f2973 | |||
3b7178cfa4 | |||
e46fa5d52e | |||
a8683102cc | |||
71bcaf99e2 | |||
8b430d7dea | |||
e0ade06d63 | |||
4bd18ec0c7 | |||
2410e320b3 | |||
48a8f4a7fd | |||
4dd6416faf | |||
c1c0d00b88 | |||
d9f726c4d0 | |||
d6e4a130b0 | |||
cfc15a1031 | |||
70f3e8e3a1 | |||
ef978fe411 | |||
f7c1234990 | |||
57f044945f | |||
4caf7044e0 | |||
6f32cddf1c | |||
c530e2cfe3 | |||
fd5dcc5c81 | |||
93dc5a2870 | |||
95529e3253 | |||
344020c926 | |||
5574081c49 | |||
d7f396486e | |||
8fbd84bf78 | |||
7d2dcce175 | |||
dc903e70ac | |||
a9c8212895 | |||
c20ecb6a51 | |||
5253edaacb | |||
017d9f1515 | |||
181b27d881 | |||
63e2a6419d | |||
264017a2bf | |||
e433c115bc | |||
86fd8bb0ac | |||
ab3a5a8259 | |||
a61f0521b8 | |||
537c9755a7 | |||
786b7f18a5 | |||
8f36444c4f | |||
185b2c29e2 | |||
5f08050d8d | |||
64da65b322 | |||
5255d99dc5 | |||
4f2ad11135 | |||
d7afab6d3a | |||
31348dff03 | |||
25e86b6a61 | |||
4efbac6d35 | |||
87069ccf68 | |||
7e45107f51 | |||
0c48b37c31 | |||
7eacffd951 | |||
2a543d6efe | |||
317b29de0f | |||
a463c333dd | |||
ea356004d4 | |||
5c976a7e1a | |||
f964493274 | |||
a4211a4dc3 | |||
563836496a | |||
4ca2c358b1 | |||
0580aab02f | |||
3711811b1d | |||
65b89d16ee | |||
931746bc6d | |||
c81dddb45c | |||
fe6d09ae61 | |||
ed70c70ea3 | |||
f0d4e14557 | |||
2ccee3def6 | |||
b92adec8e8 | |||
56f738ae9b | |||
72d3a30c63 | |||
c9b45adeeb | |||
5a6c81b051 | |||
51cd22ce56 | |||
5ed704ec8c | |||
4abf6336ec | |||
0e163fce18 | |||
96b6f475dd | |||
c410f5d020 | |||
bb8c697ee0 | |||
b9e96b17de | |||
923797fea4 | |||
cd9e60c76c | |||
93b38bea5d | |||
d0d93b92b1 | |||
89efcf1ce5 | |||
c664b0e683 | |||
d69ff0cbbb | |||
1af090b57d | |||
3dad944485 | |||
105a40f53a | |||
bbe9bd9684 | |||
4f65af0e25 | |||
d79ced3292 | |||
ab40644669 | |||
5d60def02c | |||
ea8489fce2 | |||
1b20639a43 | |||
b72af8f1ed | |||
9090bf02e7 | |||
7d648418b8 | |||
89be30fa7d | |||
f8ecb84c02 | |||
5f036d2bcc | |||
380170038e | |||
220a47627b | |||
beb89f68b4 | |||
390b495ff3 | |||
3a0e1fc070 | |||
6b7de1a030 | |||
5265631d15 | |||
2832e7b9f9 | |||
3a7dd7e367 | |||
223c19224b | |||
f1f6cc10c7 | |||
3209b49033 | |||
1e4277d2d1 | |||
9b945daaf1 | |||
9c1352eb57 | |||
7a0b011dd5 | |||
63e835cbcc | |||
94b5edeb53 | |||
ab7e6006d6 | |||
18bfcdd05c | |||
71d63ed72e | |||
d75c40734a | |||
5b23c3f26f | |||
00efdc84ba | |||
91a61da9b1 | |||
ef9b636e2d | |||
2709c0009a | |||
dd7e8f5f64 | |||
d2a68364c4 | |||
7e1081139d | |||
18473cf498 | |||
4df417d059 | |||
5d80a9178b | |||
8a25d3a71a | |||
d10f8e1d43 | |||
14cc317ba4 | |||
e1957c6ebd | |||
8cd5a992bf | |||
947f0b23cc | |||
f780504d12 | |||
bfc072addf | |||
2a18da257c | |||
6e01e8c1c8 | |||
9f659bf07f | |||
35c4bc20d9 | |||
218dc2ccda | |||
827cbcd37c | |||
cb7a1c1cbf | |||
7878958c0d | |||
ce036244c9 | |||
48cf1e413c | |||
97460585d9 | |||
f745847ef7 | |||
6549aef245 | |||
50376faa7b | |||
4b61c6b669 | |||
79d64c4954 | |||
74cd5abdd1 | |||
28c3f12104 | |||
c884819135 | |||
05921a9a7a | |||
d0215a58e7 | |||
937e7b7d7c | |||
aee8ef661a | |||
2e0b6e7757 | |||
941767127c | |||
74d8d77626 | |||
fd4ea8ef5c | |||
1066cbd152 | |||
6ef00b03a2 | |||
9140561059 | |||
77af974b40 | |||
4934d49274 | |||
358c328d69 | |||
4aaafdd289 | |||
66b108d142 | |||
e0ff920001 | |||
face83c7ec | |||
1db83e31a2 | |||
a1b9cb2a34 | |||
3a4fd5ca59 | |||
c17daa9f89 | |||
bd29cf3d3a | |||
31bff69151 | |||
ba4f826738 | |||
de60a3fb93 | |||
21d5daa4ac | |||
290e015c6c | |||
1b7c791d60 | |||
bbe4466fd9 | |||
08133c4d1a | |||
76a7983b23 | |||
8041b7305e | |||
3ec8c25cd0 | |||
671af2b1c0 | |||
6f41f0e377 | |||
2c9b638065 | |||
a7347d9a6d | |||
f8c688d746 | |||
c9fadda543 | |||
30fb0956df | |||
3a765bd5e1 | |||
26c52a5ea6 | |||
c3372e87be | |||
b0a1d667b0 | |||
e1d5402238 | |||
3d1cfbfc74 | |||
37ca558103 | |||
eed74a558f | |||
2acd76f346 | |||
b81a6a6bb3 | |||
0fbfc4b81b | |||
c06170cc8e | |||
614856da25 | |||
05bdf4eaf3 | |||
6774bd50b0 | |||
31c1f3255e | |||
21d93c140d | |||
f1c8520146 | |||
096827c284 | |||
6565d9e33e | |||
f375ec8440 | |||
518369d78c | |||
30bad5c492 | |||
3fefe271ec | |||
6428f1d051 | |||
7e1b21daac | |||
cb3f30c600 | |||
f3e024bece | |||
31d2ab4aff | |||
eb17212858 | |||
4dd4b5c538 | |||
6120e5aaea | |||
2eaa81b236 | |||
81ce2a4b26 | |||
5dd80d3777 | |||
beeee69bc9 | |||
9bf28d0b69 | |||
c0ce15dfb2 | |||
b9bcdc7158 | |||
4ff0203987 | |||
b5f882cc98 | |||
2e8fc0d4c3 | |||
dacaf5a400 | |||
24cde76a15 | |||
1aa1361510 | |||
fe470ae5ad | |||
3a8c2381f7 | |||
c85b80c2b6 | |||
2b981012a6 | |||
6ccc0bfffb | |||
c8e7eb1eb3 | |||
24f60a54f4 | |||
42c02f5892 | |||
ebede26ebf | |||
d940ce497e | |||
05ff90b692 | |||
1d9b737e05 | |||
60dc62dc9e | |||
0f90effc66 | |||
464dd985e3 | |||
c07a442854 | |||
cd3aa153a4 | |||
9b294976a2 | |||
5313c2cb8b | |||
5f09cbdb63 | |||
4cefa9b49b | |||
f86bd6190a | |||
e5452ddfd6 | |||
d06980dfa7 | |||
66785cc05c | |||
05a38612b0 | |||
d27f4bae39 | |||
8d8c2f6ffe | |||
51d3cb951d | |||
e74b1736a1 | |||
f07c1ceaa5 | |||
63b2206ad0 | |||
27feead2f8 | |||
c782195662 | |||
0f621c2c7d | |||
a9e4574261 | |||
0229c386c5 | |||
a7b3e33078 | |||
e19a64c7ef | |||
1cb4ad8de9 | |||
6ed068a71a | |||
708e6c18b0 | |||
b943890484 | |||
a1125ad4df | |||
a8b150c595 | |||
665cbcec4b | |||
7c600440f7 | |||
e0c6f556e8 | |||
de23687d16 | |||
4cea74c73b | |||
a921d8be9d | |||
094f716bf2 | |||
7d761fe3c1 | |||
cf35d8f3d7 | |||
4bb6b67188 | |||
819b18e7ba | |||
19849db573 | |||
3d4ceb292c | |||
f5a37c6c6c | |||
32c927b53f | |||
5ffc0d13a2 | |||
112627e8b2 | |||
37c1e3c218 | |||
06e9ebebd5 | |||
c5f7740d89 | |||
be66d9b125 | |||
e1054247ba | |||
8d17774f92 | |||
e946260cf3 | |||
edb305584b | |||
bb00f66e19 | |||
e87557b069 | |||
dcc543a298 | |||
0fc280b06c | |||
20d0699d49 | |||
686f5e3210 | |||
415d109527 | |||
521b35f799 | |||
cb08cd0d75 | |||
2a2c135b41 | |||
65ea2ddf17 | |||
b514d3c496 | |||
7076fa1c9f | |||
660a7fcfa4 | |||
054072bee5 | |||
eb825c1e74 | |||
1b290ace4f | |||
0d578228ca | |||
aebfcb262a | |||
ab9e8488d5 | |||
fd58b73a40 | |||
8efe23f150 | |||
06458a0b42 | |||
1a2bbc9301 | |||
e7f579eb97 | |||
8516999495 | |||
9f669a9a7c | |||
555bdcc5a3 | |||
54ca1ba71d | |||
9738b84a08 | |||
1fe0990023 | |||
7e90a2d117 | |||
5687d584fe | |||
cf8849f2d6 | |||
e575df33b1 | |||
0ce8647dc5 | |||
9cabcb7645 | |||
7b895c5976 | |||
7013a80170 | |||
79a30912b8 | |||
2f3d36a8a1 | |||
ac8d36f3e5 | |||
15f5632365 | |||
aa9af07cac | |||
69be658bba | |||
beac8dd461 | |||
28b47d1e49 | |||
1f24755bf8 | |||
bf31d3606a | |||
d189170b6c | |||
f61dc8072f | |||
f8a1e39fae | |||
a132435204 | |||
9524867701 | |||
c1376e0f82 | |||
651c614aa4 | |||
d3a5bd9fb7 | |||
e8ef4c0820 | |||
348897af31 | |||
9d9072a069 | |||
928de46888 | |||
29678cd213 | |||
d0740dff1b | |||
de89472897 | |||
e7c8555d06 | |||
ec3b5ce9cc | |||
6368e777a8 | |||
875afe38ab | |||
ee8217e5be | |||
980dd4a2c4 | |||
8285736840 | |||
91fce82c6f | |||
ac5cf86aa6 | |||
6a6119554c | |||
b95ee898fe | |||
9eed4d1f3e | |||
6b5296aa3a | |||
ee92b58b3a | |||
09ff7f106a | |||
acbed3ef40 | |||
66d18a7fb0 | |||
ba0bfd40e2 | |||
84e4e37d14 | |||
a60b353005 | |||
ebe4d1db3a | |||
b5a10eb0ef | |||
0967102c6d | |||
e2fb71ec9f | |||
f936657eb6 | |||
6f88f762bf | |||
202351d5bf | |||
2e8e49fce3 | |||
a8e98aee0c | |||
bb1ba58f06 | |||
7bedab5748 | |||
20f7cc4cde | |||
649aa730c5 | |||
a19bc5c628 | |||
28e616c4e3 | |||
30e775281d | |||
21877b0d75 | |||
cf5cb1e33e | |||
03ffd0a022 | |||
a425bd9a9a | |||
bbbf86565f | |||
9f6be8692e | |||
f187877945 | |||
947b794146 | |||
8d926e91f1 | |||
4ee52bb169 | |||
7d7e3b78a3 | |||
f98b745a81 | |||
2d1e86f1b1 | |||
1ac4ccf73c | |||
2ac4d5e2bf | |||
3302f0aef3 | |||
6f2dd6c37e | |||
bc0644574c | |||
400b8289f7 | |||
c1026311b5 | |||
2b1c116b5a | |||
cc796b1358 | |||
f029ef94d7 | |||
95592fa00a | |||
fbe66e1d0b | |||
90979c38f8 | |||
e21d7687a9 | |||
ff36139ffc | |||
e3e79e9e8a | |||
b9fe4616f9 | |||
64ca424e75 | |||
b5f93d0631 | |||
a58936966f | |||
dd54a4b026 | |||
eda1a7cad3 | |||
f04908cae7 | |||
ab019eea75 | |||
9841d48a10 | |||
3272d7a0b7 | |||
0bb1e885a0 | |||
d6545ad22e | |||
90eb3f43ca | |||
e67b4f2c2a | |||
d6770d1f23 | |||
b9cecc2635 | |||
898285c9bf | |||
a62de9ecfd | |||
4042d192f5 | |||
1117aa1411 | |||
080438477f | |||
4b5bcf8906 | |||
852ef5b4f5 | |||
db09d4ad83 | |||
c957c741d9 | |||
c07ece5ca4 | |||
7a9c20c715 | |||
005ba458b5 | |||
320a622ec4 | |||
c9927c1a6a | |||
fbd80ad409 | |||
22379d5513 | |||
1696725879 | |||
002800f081 | |||
e15932bb60 | |||
ce741ba3e4 | |||
bf87484efa | |||
8ce9c50d40 | |||
32b6816e55 | |||
c128d69856 | |||
55b28b1eee | |||
e11222333f | |||
28873a2799 | |||
0080d8329d | |||
0d93f15694 | |||
becd7a56f1 | |||
75471386de | |||
d2b2eed67c | |||
4b6f069b6f | |||
791d79de32 | |||
94d2f59895 | |||
75c0ca9d43 | |||
2a4ec90854 | |||
85ebcda94d | |||
d64bf1646c | |||
a41c20435e | |||
eedac9dba0 | |||
14f9c72bfd | |||
ad5f2fe34c | |||
4f8584756d | |||
65fc1c3127 | |||
c393af6cd7 | |||
0c04ce3234 | |||
73b3de79ea | |||
d1744376ae | |||
805de738f6 | |||
1b151ed181 | |||
e06f504a76 | |||
462ae5220a | |||
66c54aa9c3 | |||
735ecfff61 | |||
a57d13cc96 | |||
79af7e96a0 | |||
621980bdc0 | |||
aa84c92ef6 | |||
f7389f4763 | |||
55fe8a81ec | |||
e8ddc08ec8 | |||
1b0bd0fe8a | |||
20044cab7a | |||
64f23c2900 | |||
d4c7755ca8 | |||
aa39e42c5a | |||
953f28cf9a | |||
c0d00f5be6 | |||
58a072be15 | |||
82ad323dee | |||
df5dd3c68e | |||
2d867b55fa | |||
d7a1c6d614 | |||
7d5a155e4a | |||
1dde34e0f8 | |||
6fc2a38b11 | |||
c487a221ee | |||
9925c17940 | |||
8c4b2592fb | |||
cf21a9bd5c | |||
16c3e295a8 | |||
bda41c70dd | |||
453bafb96f | |||
328d231c17 | |||
b4b195b360 | |||
20b0d88d16 | |||
2bdea7ac11 | |||
58df2883cb | |||
6d7d95a70a | |||
96853af5a8 | |||
dbed69058c | |||
7b6ae94059 | |||
c6dfc3cdbe | |||
51be365143 | |||
c894836108 | |||
75beba29b5 | |||
ddfdf470ae | |||
b6fbb9a565 | |||
2179e4f4c5 | |||
a945fcc2ae | |||
be54f8e5c4 | |||
b396cb4998 | |||
1c395b4eaa | |||
3d64cf019e | |||
98fe8cb542 | |||
ffa6d2f9f9 | |||
404422f42e | |||
7717d0838b | |||
42e0c1df78 | |||
e41f06702c | |||
d6fa1be3a8 | |||
0ffded812a | |||
0bd2a573a5 | |||
49b26e2cec | |||
dafd924c1f | |||
598dc4b79a | |||
85de093472 | |||
f72297562f | |||
9d27b09d12 | |||
998d9d1509 | |||
425040d4c1 | |||
4338cc4750 | |||
bdd6b4c8bc | |||
2b7d3aca2e | |||
4026a049d3 | |||
43710e8d09 | |||
526df28fb2 | |||
2cf1a333b6 | |||
0b7db411b5 | |||
471a7a4566 | |||
6214dd6ce9 | |||
0603379863 | |||
665c48963b | |||
298695b766 | |||
83658c8ace | |||
1d24ccb96c | |||
14f0b39cda | |||
2e0d314384 | |||
67d96c29fb | |||
033f5c78f5 | |||
794e578de0 | |||
caddfc14c1 | |||
fc72e39de3 | |||
b7e62d3454 | |||
364536acd1 | |||
0b32a987dd | |||
570fb2e9cc | |||
a255885f83 | |||
5822ede66e | |||
0370afa2e5 | |||
7e2a913c64 | |||
3f92038b99 | |||
dcda03b4cb | |||
bf5f121c02 | |||
bec7b2dc26 | |||
0b98ba15c7 | |||
e5464ee484 | |||
bab8f3dd0d | |||
eedb46bf03 | |||
311490a720 | |||
da5ddcd544 | |||
5020e1e80c | |||
4298374265 | |||
e38074b1e6 | |||
376725ce74 | |||
456941cfe4 | |||
1a956e136b | |||
8274ca23ac | |||
62ec38ea41 | |||
0eda2e0953 | |||
211318d44a | |||
337871c6fd | |||
56b7f0efa4 | |||
d721168449 | |||
4a151dd453 | |||
057daef778 | |||
e86717833d | |||
aedba6d5ec | |||
a283ec2eec | |||
3f942acfe1 | |||
19d2899439 | |||
655a5e48df | |||
f746ced08d | |||
c3442c1f6f | |||
7297fa6f7c | |||
b7955ef17b | |||
f756799b84 | |||
825d8892b5 | |||
b322fd1607 | |||
667ba3995c | |||
707ec647bb | |||
89988ec8c2 | |||
6208d622ca | |||
42f1042e1c | |||
55f8b0a5de | |||
9f88db35da | |||
ae356774ab | |||
e331957784 | |||
8d66a7b6d7 | |||
ce26e57fd3 | |||
85eb631839 | |||
add055e151 | |||
7c041ab578 | |||
8917782af6 | |||
7addca5935 | |||
c84e924287 | |||
c9d5b6d4a8 | |||
189ae23133 | |||
e548c1488a | |||
130d5fd8c7 | |||
e070829ae8 | |||
436e523bf1 | |||
27f1410d06 | |||
4858f3bb45 | |||
a96d63c21d |
36
.buildkite/check-wheel-size.py
Normal file
36
.buildkite/check-wheel-size.py
Normal file
@ -0,0 +1,36 @@
|
||||
import os
|
||||
import zipfile
|
||||
|
||||
MAX_SIZE_MB = 200
|
||||
|
||||
|
||||
def print_top_10_largest_files(zip_file):
|
||||
with zipfile.ZipFile(zip_file, 'r') as z:
|
||||
file_sizes = [(f, z.getinfo(f).file_size) for f in z.namelist()]
|
||||
file_sizes.sort(key=lambda x: x[1], reverse=True)
|
||||
for f, size in file_sizes[:10]:
|
||||
print(f"{f}: {size/(1024*1024)} MBs uncompressed.")
|
||||
|
||||
|
||||
def check_wheel_size(directory):
|
||||
for root, _, files in os.walk(directory):
|
||||
for f in files:
|
||||
if f.endswith(".whl"):
|
||||
wheel_path = os.path.join(root, f)
|
||||
wheel_size = os.path.getsize(wheel_path)
|
||||
wheel_size_mb = wheel_size / (1024 * 1024)
|
||||
if wheel_size_mb > MAX_SIZE_MB:
|
||||
print(
|
||||
f"Wheel {wheel_path} is too large ({wheel_size_mb} MB) "
|
||||
f"compare to the allowed size ({MAX_SIZE_MB} MB).")
|
||||
print_top_10_largest_files(wheel_path)
|
||||
return 1
|
||||
else:
|
||||
print(f"Wheel {wheel_path} is within the allowed size "
|
||||
f"({wheel_size_mb} MB).")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
sys.exit(check_wheel_size(sys.argv[1]))
|
14
.buildkite/download-images.sh
Normal file
14
.buildkite/download-images.sh
Normal file
@ -0,0 +1,14 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
set -o pipefail
|
||||
|
||||
(which wget && which curl) || (apt-get update && apt-get install -y wget curl)
|
||||
|
||||
# aws s3 sync s3://air-example-data-2/vllm_opensource_llava/ images/
|
||||
mkdir -p images
|
||||
cd images
|
||||
wget https://air-example-data-2.s3.us-west-2.amazonaws.com/vllm_opensource_llava/stop_sign.jpg
|
||||
wget https://air-example-data-2.s3.us-west-2.amazonaws.com/vllm_opensource_llava/cherry_blossom.jpg
|
||||
|
||||
cd -
|
@ -0,0 +1,11 @@
|
||||
# bash ./run-lm-eval-gsm-vllm-baseline.sh -m deepseek-ai/DeepSeek-V2-Lite-Chat -b "auto" -l 1000 -f 5 -t 2
|
||||
model_name: "deepseek-ai/DeepSeek-V2-Lite-Chat"
|
||||
tasks:
|
||||
- name: "gsm8k"
|
||||
metrics:
|
||||
- name: "exact_match,strict-match"
|
||||
value: 0.671
|
||||
- name: "exact_match,flexible-extract"
|
||||
value: 0.664
|
||||
limit: 1000
|
||||
num_fewshot: 5
|
@ -0,0 +1,11 @@
|
||||
# bash .buildkite/lm-eval-harness/run-lm-eval-gsm-hf-baseline.sh -m meta-llama/Meta-Llama-3-70B-Instruct -b 32 -l 250 -f 5
|
||||
model_name: "meta-llama/Meta-Llama-3-70B-Instruct"
|
||||
tasks:
|
||||
- name: "gsm8k"
|
||||
metrics:
|
||||
- name: "exact_match,strict-match"
|
||||
value: 0.892
|
||||
- name: "exact_match,flexible-extract"
|
||||
value: 0.892
|
||||
limit: 250
|
||||
num_fewshot: 5
|
@ -0,0 +1,11 @@
|
||||
# bash .buildkite/lm-eval-harness/run-lm-eval-gsm-vllm-baseline.sh -m nm-testing/Meta-Llama-3-8B-FP8-compressed-tensors-test -b 32 -l 1000 -f 5 -t 1
|
||||
model_name: "nm-testing/Meta-Llama-3-8B-FP8-compressed-tensors-test"
|
||||
tasks:
|
||||
- name: "gsm8k"
|
||||
metrics:
|
||||
- name: "exact_match,strict-match"
|
||||
value: 0.755
|
||||
- name: "exact_match,flexible-extract"
|
||||
value: 0.755
|
||||
limit: 1000
|
||||
num_fewshot: 5
|
@ -0,0 +1,11 @@
|
||||
# bash .buildkite/lm-eval-harness/run-lm-eval-gsm-vllm-baseline.sh -m neuralmagic/Meta-Llama-3-8B-Instruct-FP8 -b 32 -l 250 -f 5 -t 1
|
||||
model_name: "neuralmagic/Meta-Llama-3-8B-Instruct-FP8"
|
||||
tasks:
|
||||
- name: "gsm8k"
|
||||
metrics:
|
||||
- name: "exact_match,strict-match"
|
||||
value: 0.753
|
||||
- name: "exact_match,flexible-extract"
|
||||
value: 0.753
|
||||
limit: 1000
|
||||
num_fewshot: 5
|
@ -0,0 +1,11 @@
|
||||
# bash .buildkite/lm-eval-harness/run-lm-eval-gsm-vllm-baseline.sh -m nm-testing/Meta-Llama-3-8B-Instruct-W8-Channel-A8-Dynamic-Per-Token-Test -b "auto" -l 250 -f 5 -t 1
|
||||
model_name: "nm-testing/Meta-Llama-3-8B-Instruct-W8-Channel-A8-Dynamic-Per-Token-Test"
|
||||
tasks:
|
||||
- name: "gsm8k"
|
||||
metrics:
|
||||
- name: "exact_match,strict-match"
|
||||
value: 0.728
|
||||
- name: "exact_match,flexible-extract"
|
||||
value: 0.728
|
||||
limit: 250
|
||||
num_fewshot: 5
|
@ -0,0 +1,11 @@
|
||||
# bash .buildkite/lm-eval-harness/run-lm-eval-gsm-hf-baseline.sh -m meta-llama/Meta-Llama-3-8B-Instruct -b 32 -l 250 -f 5 -t 1
|
||||
model_name: "meta-llama/Meta-Llama-3-8B-Instruct"
|
||||
tasks:
|
||||
- name: "gsm8k"
|
||||
metrics:
|
||||
- name: "exact_match,strict-match"
|
||||
value: 0.756
|
||||
- name: "exact_match,flexible-extract"
|
||||
value: 0.752
|
||||
limit: 250
|
||||
num_fewshot: 5
|
@ -0,0 +1,11 @@
|
||||
# bash ./run-lm-eval-gsm-vllm-baseline.sh -m neuralmagic/Mixtral-8x22B-Instruct-v0.1-FP8-dynamic -b "auto" -l 250 -f 5 -t 8
|
||||
model_name: "neuralmagic/Mixtral-8x22B-Instruct-v0.1-FP8-dynamic"
|
||||
tasks:
|
||||
- name: "gsm8k"
|
||||
metrics:
|
||||
- name: "exact_match,strict-match"
|
||||
value: 0.86
|
||||
- name: "exact_match,flexible-extract"
|
||||
value: 0.86
|
||||
limit: 250
|
||||
num_fewshot: 5
|
@ -0,0 +1,11 @@
|
||||
# bash ./run-lm-eval-gsm-vllm-baseline.sh -m neuralmagic/Mixtral-8x7B-Instruct-v0.1-FP8 -b "auto" -l 250 -f 5 -t 4
|
||||
model_name: "neuralmagic/Mixtral-8x7B-Instruct-v0.1-FP8"
|
||||
tasks:
|
||||
- name: "gsm8k"
|
||||
metrics:
|
||||
- name: "exact_match,strict-match"
|
||||
value: 0.624
|
||||
- name: "exact_match,flexible-extract"
|
||||
value: 0.624
|
||||
limit: 250
|
||||
num_fewshot: 5
|
@ -0,0 +1,11 @@
|
||||
# bash .buildkite/lm-eval-harness/run-lm-eval-gsm-hf-baseline.sh -m neuralmagic/Mixtral-8x7B-Instruct-v0.1 -b 32 -l 250 -f 5 -t 4
|
||||
model_name: "mistralai/Mixtral-8x7B-Instruct-v0.1"
|
||||
tasks:
|
||||
- name: "gsm8k"
|
||||
metrics:
|
||||
- name: "exact_match,strict-match"
|
||||
value: 0.616
|
||||
- name: "exact_match,flexible-extract"
|
||||
value: 0.632
|
||||
limit: 250
|
||||
num_fewshot: 5
|
@ -0,0 +1,11 @@
|
||||
# bash .buildkite/lm-eval-harness/run-lm-eval-gsm-vllm-baseline.sh -m neuralmagic/Qwen2-1.5B-Instruct-quantized.w8a8 -b "auto" -l 1000 -f 5 -t 1
|
||||
model_name: "neuralmagic/Qwen2-1.5B-Instruct-quantized.w8a8"
|
||||
tasks:
|
||||
- name: "gsm8k"
|
||||
metrics:
|
||||
- name: "exact_match,strict-match"
|
||||
value: 0.593
|
||||
- name: "exact_match,flexible-extract"
|
||||
value: 0.588
|
||||
limit: 1000
|
||||
num_fewshot: 5
|
@ -0,0 +1,11 @@
|
||||
# bash .buildkite/lm-eval-harness/run-lm-eval-gsm-vllm-baseline.sh -m nm-testing/Qwen2-1.5B-Instruct-W8A16-Channelwise -b "auto" -l 1000 -f 5 -t 1
|
||||
model_name: "nm-testing/Qwen2-1.5B-Instruct-W8A16-Channelwise"
|
||||
tasks:
|
||||
- name: "gsm8k"
|
||||
metrics:
|
||||
- name: "exact_match,strict-match"
|
||||
value: 0.595
|
||||
- name: "exact_match,flexible-extract"
|
||||
value: 0.582
|
||||
limit: 1000
|
||||
num_fewshot: 5
|
@ -0,0 +1,11 @@
|
||||
# bash ./run-lm-eval-gsm-vllm-baseline.sh -m Qwen/Qwen2-57B-A14B-Instruct -b "auto" -l 250 -f 5 -t 4
|
||||
model_name: "Qwen/Qwen2-57B-A14B-Instruct"
|
||||
tasks:
|
||||
- name: "gsm8k"
|
||||
metrics:
|
||||
- name: "exact_match,strict-match"
|
||||
value: 0.792
|
||||
- name: "exact_match,flexible-extract"
|
||||
value: 0.824
|
||||
limit: 250
|
||||
num_fewshot: 5
|
4
.buildkite/lm-eval-harness/configs/models-large.txt
Normal file
4
.buildkite/lm-eval-harness/configs/models-large.txt
Normal file
@ -0,0 +1,4 @@
|
||||
Meta-Llama-3-70B-Instruct.yaml
|
||||
Mixtral-8x7B-Instruct-v0.1.yaml
|
||||
Qwen2-57B-A14-Instruct.yaml
|
||||
DeepSeek-V2-Lite-Chat.yaml
|
5
.buildkite/lm-eval-harness/configs/models-small.txt
Normal file
5
.buildkite/lm-eval-harness/configs/models-small.txt
Normal file
@ -0,0 +1,5 @@
|
||||
Meta-Llama-3-8B-Instruct.yaml
|
||||
Meta-Llama-3-8B-Instruct-FP8.yaml
|
||||
Meta-Llama-3-8B-Instruct-FP8-compressed-tensors.yaml
|
||||
Meta-Llama-3-8B-Instruct-INT8-compressed-tensors.yaml
|
||||
Qwen2-1.5B-Instruct-INT8-compressed-tensors.yaml
|
46
.buildkite/lm-eval-harness/run-lm-eval-gsm-hf-baseline.sh
Normal file
46
.buildkite/lm-eval-harness/run-lm-eval-gsm-hf-baseline.sh
Normal file
@ -0,0 +1,46 @@
|
||||
#!/bin/bash
|
||||
# We can use this script to compute baseline accuracy on GSM for transformers.
|
||||
#
|
||||
# Make sure you have lm-eval-harness installed:
|
||||
# pip install git+https://github.com/EleutherAI/lm-evaluation-harness.git@9516087b81a61d0e220b22cc1b75be76de23bc10
|
||||
|
||||
usage() {
|
||||
echo``
|
||||
echo "Runs lm eval harness on GSM8k using huggingface transformers."
|
||||
echo "This pathway is intended to be used to create baselines for "
|
||||
echo "our automated nm-test-accuracy workflow"
|
||||
echo
|
||||
echo "usage: ${0} <options>"
|
||||
echo
|
||||
echo " -m - huggingface stub or local directory of the model"
|
||||
echo " -b - batch size to run the evaluation at"
|
||||
echo " -l - limit number of samples to run"
|
||||
echo " -f - number of fewshot samples to use"
|
||||
echo
|
||||
}
|
||||
|
||||
while getopts "m:b:l:f:" OPT; do
|
||||
case ${OPT} in
|
||||
m )
|
||||
MODEL="$OPTARG"
|
||||
;;
|
||||
b )
|
||||
BATCH_SIZE="$OPTARG"
|
||||
;;
|
||||
l )
|
||||
LIMIT="$OPTARG"
|
||||
;;
|
||||
f )
|
||||
FEWSHOT="$OPTARG"
|
||||
;;
|
||||
\? )
|
||||
usage
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
lm_eval --model hf \
|
||||
--model_args pretrained=$MODEL,parallelize=True \
|
||||
--tasks gsm8k --num_fewshot $FEWSHOT --limit $LIMIT \
|
||||
--batch_size $BATCH_SIZE
|
51
.buildkite/lm-eval-harness/run-lm-eval-gsm-vllm-baseline.sh
Normal file
51
.buildkite/lm-eval-harness/run-lm-eval-gsm-vllm-baseline.sh
Normal file
@ -0,0 +1,51 @@
|
||||
#!/bin/bash
|
||||
# We can use this script to compute baseline accuracy on GSM for vllm.
|
||||
# We use this for fp8, which HF does not support.
|
||||
#
|
||||
# Make sure you have lm-eval-harness installed:
|
||||
# pip install lm-eval==0.4.3
|
||||
|
||||
usage() {
|
||||
echo``
|
||||
echo "Runs lm eval harness on GSM8k using huggingface transformers."
|
||||
echo "This pathway is intended to be used to create baselines for "
|
||||
echo "our automated nm-test-accuracy workflow"
|
||||
echo
|
||||
echo "usage: ${0} <options>"
|
||||
echo
|
||||
echo " -m - huggingface stub or local directory of the model"
|
||||
echo " -b - batch size to run the evaluation at"
|
||||
echo " -l - limit number of samples to run"
|
||||
echo " -f - number of fewshot samples to use"
|
||||
echo " -t - tensor parallel size to run at"
|
||||
echo
|
||||
}
|
||||
|
||||
while getopts "m:b:l:f:t:" OPT; do
|
||||
case ${OPT} in
|
||||
m )
|
||||
MODEL="$OPTARG"
|
||||
;;
|
||||
b )
|
||||
BATCH_SIZE="$OPTARG"
|
||||
;;
|
||||
l )
|
||||
LIMIT="$OPTARG"
|
||||
;;
|
||||
f )
|
||||
FEWSHOT="$OPTARG"
|
||||
;;
|
||||
t )
|
||||
TP_SIZE="$OPTARG"
|
||||
;;
|
||||
\? )
|
||||
usage
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
lm_eval --model vllm \
|
||||
--model_args pretrained=$MODEL,tensor_parallel_size=$TP_SIZE,add_bos_token=true,distributed_executor_backend="ray",trust_remote_code=true,max_model_len=4096 \
|
||||
--tasks gsm8k --num_fewshot $FEWSHOT --limit $LIMIT \
|
||||
--batch_size $BATCH_SIZE
|
59
.buildkite/lm-eval-harness/run-tests.sh
Normal file
59
.buildkite/lm-eval-harness/run-tests.sh
Normal file
@ -0,0 +1,59 @@
|
||||
#!/bin/bash
|
||||
|
||||
usage() {
|
||||
echo``
|
||||
echo "Runs lm eval harness on GSM8k using vllm and compares to "
|
||||
echo "precomputed baseline (measured by HF transformers.)"
|
||||
echo
|
||||
echo "usage: ${0} <options>"
|
||||
echo
|
||||
echo " -c - path to the test data config (e.g. configs/small-models.txt)"
|
||||
echo " -t - tensor parallel size"
|
||||
echo
|
||||
}
|
||||
|
||||
SUCCESS=0
|
||||
|
||||
while getopts "c:t:" OPT; do
|
||||
case ${OPT} in
|
||||
c )
|
||||
CONFIG="$OPTARG"
|
||||
;;
|
||||
t )
|
||||
TP_SIZE="$OPTARG"
|
||||
;;
|
||||
\? )
|
||||
usage
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Parse list of configs.
|
||||
IFS=$'\n' read -d '' -r -a MODEL_CONFIGS < $CONFIG
|
||||
|
||||
for MODEL_CONFIG in "${MODEL_CONFIGS[@]}"
|
||||
do
|
||||
LOCAL_SUCCESS=0
|
||||
|
||||
echo "=== RUNNING MODEL: $MODEL_CONFIG WITH TP SIZE: $TP_SIZE==="
|
||||
|
||||
export LM_EVAL_TEST_DATA_FILE=$PWD/configs/${MODEL_CONFIG}
|
||||
export LM_EVAL_TP_SIZE=$TP_SIZE
|
||||
pytest -s test_lm_eval_correctness.py || LOCAL_SUCCESS=$?
|
||||
|
||||
if [[ $LOCAL_SUCCESS == 0 ]]; then
|
||||
echo "=== PASSED MODEL: ${MODEL_CONFIG} ==="
|
||||
else
|
||||
echo "=== FAILED MODEL: ${MODEL_CONFIG} ==="
|
||||
fi
|
||||
|
||||
SUCCESS=$((SUCCESS + LOCAL_SUCCESS))
|
||||
|
||||
done
|
||||
|
||||
if [ "${SUCCESS}" -eq "0" ]; then
|
||||
exit 0
|
||||
else
|
||||
exit 1
|
||||
fi
|
55
.buildkite/lm-eval-harness/test_lm_eval_correctness.py
Normal file
55
.buildkite/lm-eval-harness/test_lm_eval_correctness.py
Normal file
@ -0,0 +1,55 @@
|
||||
"""
|
||||
LM eval harness on model to compare vs HF baseline computed offline.
|
||||
Configs are found in configs/$MODEL.yaml
|
||||
|
||||
* export LM_EVAL_TEST_DATA_FILE=configs/Meta-Llama-3-70B-Instruct.yaml
|
||||
* export LM_EVAL_TP_SIZE=4
|
||||
* pytest -s test_lm_eval_correctness.py
|
||||
"""
|
||||
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
import lm_eval
|
||||
import numpy
|
||||
import yaml
|
||||
|
||||
RTOL = 0.02
|
||||
TEST_DATA_FILE = os.environ.get(
|
||||
"LM_EVAL_TEST_DATA_FILE",
|
||||
".buildkite/lm-eval-harness/configs/Meta-Llama-3-8B-Instruct.yaml")
|
||||
|
||||
TP_SIZE = os.environ.get("LM_EVAL_TP_SIZE", 1)
|
||||
|
||||
|
||||
def launch_lm_eval(eval_config):
|
||||
model_args = f"pretrained={eval_config['model_name']}," \
|
||||
f"tensor_parallel_size={TP_SIZE}," \
|
||||
f"add_bos_token=true"
|
||||
|
||||
results = lm_eval.simple_evaluate(
|
||||
model="vllm",
|
||||
model_args=model_args,
|
||||
tasks=[task["name"] for task in eval_config["tasks"]],
|
||||
num_fewshot=eval_config["num_fewshot"],
|
||||
limit=eval_config["limit"],
|
||||
batch_size="auto")
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def test_lm_eval_correctness():
|
||||
eval_config = yaml.safe_load(
|
||||
Path(TEST_DATA_FILE).read_text(encoding="utf-8"))
|
||||
|
||||
# Launch eval requests.
|
||||
results = launch_lm_eval(eval_config)
|
||||
|
||||
# Confirm scores match ground truth.
|
||||
for task in eval_config["tasks"]:
|
||||
for metric in task["metrics"]:
|
||||
ground_truth = metric["value"]
|
||||
measured_value = results["results"][task["name"]][metric["name"]]
|
||||
print(f'{task["name"]} | {metric["name"]}: '
|
||||
f'ground_truth={ground_truth} | measured={measured_value}')
|
||||
assert numpy.isclose(ground_truth, measured_value, rtol=RTOL)
|
104
.buildkite/nightly-benchmarks/README.md
Normal file
104
.buildkite/nightly-benchmarks/README.md
Normal file
@ -0,0 +1,104 @@
|
||||
# vLLM benchmark suite
|
||||
|
||||
|
||||
## Introduction
|
||||
|
||||
This directory contains the performance benchmarking CI for vllm.
|
||||
The goal is to help developers know the impact of their PRs on the performance of vllm.
|
||||
|
||||
This benchmark will be *triggered* upon:
|
||||
- A PR being merged into vllm.
|
||||
- Every commit for those PRs with `perf-benchmarks` label.
|
||||
|
||||
**Benchmarking Coverage**: latency, throughput and fix-qps serving on A100 (the support for more GPUs is comming later), with different models.
|
||||
|
||||
**Benchmarking Duration**: about 1hr.
|
||||
|
||||
**For benchmarking developers**: please try your best to constraint the duration of benchmarking to less than 1.5 hr so that it won't take forever to run.
|
||||
|
||||
|
||||
## Configuring the workload
|
||||
|
||||
The benchmarking workload contains three parts:
|
||||
- Latency tests in `latency-tests.json`.
|
||||
- Throughput tests in `throughput-tests.json`.
|
||||
- Serving tests in `serving-tests.json`.
|
||||
|
||||
See [descriptions.md](tests/descriptions.md) for detailed descriptions.
|
||||
|
||||
### Latency test
|
||||
|
||||
Here is an example of one test inside `latency-tests.json`:
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"test_name": "latency_llama8B_tp1",
|
||||
"parameters": {
|
||||
"model": "meta-llama/Meta-Llama-3-8B",
|
||||
"tensor_parallel_size": 1,
|
||||
"load_format": "dummy",
|
||||
"num_iters_warmup": 5,
|
||||
"num_iters": 15
|
||||
}
|
||||
},
|
||||
]
|
||||
```
|
||||
|
||||
In this example:
|
||||
- The `test_name` attributes is a unique identifier for the test. In `latency-tests.json`, it must start with `latency_`.
|
||||
- The `parameters` attribute control the command line arguments to be used for `benchmark_latency.py`. Note that please use underline `_` instead of the dash `-` when specifying the command line arguments, and `run-benchmarks-suite.sh` will convert the underline to dash when feeding the arguments to `benchmark_latency.py`. For example, the corresponding command line arguments for `benchmark_latency.py` will be `--model meta-llama/Meta-Llama-3-8B --tensor-parallel-size 1 --load-format dummy --num-iters-warmup 5 --num-iters 15`
|
||||
|
||||
Note that the performance numbers are highly sensitive to the value of the parameters. Please make sure the parameters are set correctly.
|
||||
|
||||
WARNING: The benchmarking script will save json results by itself, so please do not configure `--output-json` parameter in the json file.
|
||||
|
||||
|
||||
### Throughput test
|
||||
The tests are specified in `throughput-tests.json`. The syntax is similar to `latency-tests.json`, except for that the parameters will be fed forward to `benchmark_throughput.py`.
|
||||
|
||||
The number of this test is also stable -- a slight change on the value of this number might vary the performance numbers by a lot.
|
||||
|
||||
### Serving test
|
||||
We test the throughput by using `benchmark_serving.py` with request rate = inf to cover the online serving overhead. The corresponding parameters are in `serving-tests.json`, and here is an example:
|
||||
|
||||
```
|
||||
[
|
||||
{
|
||||
"test_name": "serving_llama8B_tp1_sharegpt",
|
||||
"qps_list": [1, 4, 16, "inf"],
|
||||
"server_parameters": {
|
||||
"model": "meta-llama/Meta-Llama-3-8B",
|
||||
"tensor_parallel_size": 1,
|
||||
"swap_space": 16,
|
||||
"disable_log_stats": "",
|
||||
"disable_log_requests": "",
|
||||
"load_format": "dummy"
|
||||
},
|
||||
"client_parameters": {
|
||||
"model": "meta-llama/Meta-Llama-3-8B",
|
||||
"backend": "vllm",
|
||||
"dataset_name": "sharegpt",
|
||||
"dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json",
|
||||
"num_prompts": 200
|
||||
}
|
||||
},
|
||||
]
|
||||
```
|
||||
|
||||
Inside this example:
|
||||
- The `test_name` attribute is also a unique identifier for the test. It must start with `serving_`.
|
||||
- The `server-parameters` includes the command line arguments for vLLM server.
|
||||
- The `client-parameters` includes the command line arguments for `benchmark_serving.py`.
|
||||
- The `qps_list` controls the list of qps for test. It will be used to configure the `--request-rate` parameter in `benchmark_serving.py`
|
||||
|
||||
The number of this test is less stable compared to the delay and latency benchmarks (due to randomized sharegpt dataset sampling inside `benchmark_serving.py`), but a large change on this number (e.g. 5% change) still vary the output greatly.
|
||||
|
||||
WARNING: The benchmarking script will save json results by itself, so please do not configure `--save-results` or other results-saving-related parameters in `serving-tests.json`.
|
||||
|
||||
## Visualizing the results
|
||||
The `convert-results-json-to-markdown.py` helps you put the benchmarking results inside a markdown table, by formatting [descriptions.md](tests/descriptions.md) with real benchmarking results.
|
||||
You can find the result presented as a table inside the `buildkite/performance-benchmark` job page.
|
||||
If you do not see the table, please wait till the benchmark finish running.
|
||||
The json version of the table (together with the json version of the benchmark) will be also attached to the markdown file.
|
||||
The raw benchmarking results (in the format of json files) are in the `Artifacts` tab of the benchmarking.
|
61
.buildkite/nightly-benchmarks/benchmark-pipeline.yaml
Normal file
61
.buildkite/nightly-benchmarks/benchmark-pipeline.yaml
Normal file
@ -0,0 +1,61 @@
|
||||
steps:
|
||||
- label: "Wait for container to be ready"
|
||||
agents:
|
||||
queue: A100
|
||||
plugins:
|
||||
- kubernetes:
|
||||
podSpec:
|
||||
containers:
|
||||
- image: badouralix/curl-jq
|
||||
command:
|
||||
- sh
|
||||
- .buildkite/nightly-benchmarks/scripts/wait-for-image.sh
|
||||
- wait
|
||||
- label: "A100"
|
||||
agents:
|
||||
queue: A100
|
||||
plugins:
|
||||
- kubernetes:
|
||||
podSpec:
|
||||
priorityClassName: perf-benchmark
|
||||
containers:
|
||||
- image: public.ecr.aws/q9t5s3a7/vllm-ci-test-repo:$BUILDKITE_COMMIT
|
||||
command:
|
||||
- bash .buildkite/nightly-benchmarks/run-benchmarks-suite.sh
|
||||
resources:
|
||||
limits:
|
||||
nvidia.com/gpu: 8
|
||||
volumeMounts:
|
||||
- name: devshm
|
||||
mountPath: /dev/shm
|
||||
env:
|
||||
- name: VLLM_USAGE_SOURCE
|
||||
value: ci-test
|
||||
- name: HF_TOKEN
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: hf-token-secret
|
||||
key: token
|
||||
nodeSelector:
|
||||
nvidia.com/gpu.product: NVIDIA-A100-SXM4-80GB
|
||||
volumes:
|
||||
- name: devshm
|
||||
emptyDir:
|
||||
medium: Memory
|
||||
- label: "H100"
|
||||
agents:
|
||||
queue: H100
|
||||
plugins:
|
||||
- docker#v5.11.0:
|
||||
image: public.ecr.aws/q9t5s3a7/vllm-ci-test-repo:$BUILDKITE_COMMIT
|
||||
command:
|
||||
- bash
|
||||
- .buildkite/nightly-benchmarks/run-benchmarks-suite.sh
|
||||
mount-buildkite-agent: true
|
||||
propagate-environment: true
|
||||
ipc: host
|
||||
gpus: all
|
||||
environment:
|
||||
- VLLM_USAGE_SOURCE
|
||||
- HF_TOKEN
|
||||
|
45
.buildkite/nightly-benchmarks/nightly-descriptions.md
Normal file
45
.buildkite/nightly-benchmarks/nightly-descriptions.md
Normal file
@ -0,0 +1,45 @@
|
||||
|
||||
# Nightly benchmark
|
||||
|
||||
The main goal of this benchmarking is two-fold:
|
||||
- Performance clarity: Provide clarity on which one (vllm, tensorrt-llm, lmdeploy and tgi) leads in performance in what workload.
|
||||
- Reproducible: one can run the exact same set of benchmarking commands inside the exact same docker by following reproducing instructions in [reproduce.md]().
|
||||
|
||||
|
||||
## Docker images
|
||||
|
||||
We benchmark vllm, tensorrt-llm, lmdeploy and tgi using the following docker images:
|
||||
- vllm/vllm-openai:v0.5.0.post1
|
||||
- nvcr.io/nvidia/tritonserver:24.04-trtllm-python-py3
|
||||
- openmmlab/lmdeploy:v0.5.0
|
||||
- ghcr.io/huggingface/text-generation-inference:2.1
|
||||
|
||||
<!-- Please check <a href="artifact://workspace/build/buildkite/vllm/performance-benchmark/.buildkite/nightly-benchmarks/nightly-pipeline.yaml">nightly-pipeline.yaml</a> artifact for more details on how we deploy the docker images. -->
|
||||
|
||||
|
||||
## Hardware
|
||||
|
||||
One AWS node with 8x NVIDIA A100 GPUs.
|
||||
|
||||
|
||||
## Workload description
|
||||
|
||||
We benchmark vllm, tensorrt-llm, lmdeploy and tgi using the following workload:
|
||||
|
||||
- Input length: randomly sample 500 prompts from ShareGPT dataset (with fixed random seed).
|
||||
- Output length: the corresponding output length of these 500 prompts.
|
||||
- Models: llama-3 8B, llama-3 70B, mixtral 8x7B.
|
||||
- Average QPS (query per second): 4 for the small model (llama-3 8B) and 2 for other two models. For each QPS, the arrival time of each query is determined using a random Poisson process (with fixed random seed).
|
||||
- Evaluation metrics: Throughput (higher the better), TTFT (time to the first token, lower the better), ITL (inter-token latency, lower the better).
|
||||
|
||||
<!-- Check <a href="artifact://workspace/build/buildkite/vllm/performance-benchmark/.buildkite/nightly-benchmarks/tests/nightly-tests.json">nightly-tests.json</a> artifact for more details. -->
|
||||
|
||||
## Plots
|
||||
|
||||
In the following plots, the dot shows the mean and the error bar shows the standard error of the mean. Value 0 means that the corresponding benchmark crashed.
|
||||
|
||||
<img src="artifact://nightly_results.png" alt="Benchmarking results" height=250 >
|
||||
|
||||
## Results
|
||||
|
||||
{nightly_results_benchmarking_table}
|
120
.buildkite/nightly-benchmarks/nightly-pipeline.yaml
Normal file
120
.buildkite/nightly-benchmarks/nightly-pipeline.yaml
Normal file
@ -0,0 +1,120 @@
|
||||
common_pod_spec: &common_pod_spec
|
||||
priorityClassName: perf-benchmark
|
||||
nodeSelector:
|
||||
nvidia.com/gpu.product: NVIDIA-A100-SXM4-80GB
|
||||
volumes:
|
||||
- name: devshm
|
||||
emptyDir:
|
||||
medium: Memory
|
||||
- name: hf-cache
|
||||
hostPath:
|
||||
path: /root/.cache/huggingface
|
||||
type: Directory
|
||||
|
||||
common_container_settings: &common_container_settings
|
||||
command:
|
||||
- bash .buildkite/nightly-benchmarks/run-nightly-suite.sh
|
||||
resources:
|
||||
limits:
|
||||
nvidia.com/gpu: 8
|
||||
volumeMounts:
|
||||
- name: devshm
|
||||
mountPath: /dev/shm
|
||||
- name: hf-cache
|
||||
mountPath: /root/.cache/huggingface
|
||||
env:
|
||||
- name: VLLM_USAGE_SOURCE
|
||||
value: ci-test
|
||||
- name: HF_HOME
|
||||
value: /root/.cache/huggingface
|
||||
- name: VLLM_SOURCE_CODE_LOC
|
||||
value: /workspace/build/buildkite/vllm/performance-benchmark
|
||||
- name: HF_TOKEN
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: hf-token-secret
|
||||
key: token
|
||||
|
||||
steps:
|
||||
- block: ":rocket: Ready for comparing vllm against alternatives? This will take 4 hours."
|
||||
- label: "A100 trt benchmark"
|
||||
priority: 100
|
||||
agents:
|
||||
queue: A100
|
||||
plugins:
|
||||
- kubernetes:
|
||||
podSpec:
|
||||
<<: *common_pod_spec
|
||||
containers:
|
||||
- image: nvcr.io/nvidia/tritonserver:24.04-trtllm-python-py3
|
||||
<<: *common_container_settings
|
||||
|
||||
- label: "A100 lmdeploy benchmark"
|
||||
priority: 100
|
||||
agents:
|
||||
queue: A100
|
||||
plugins:
|
||||
- kubernetes:
|
||||
podSpec:
|
||||
<<: *common_pod_spec
|
||||
containers:
|
||||
- image: openmmlab/lmdeploy:v0.5.0
|
||||
<<: *common_container_settings
|
||||
|
||||
|
||||
- label: "A100 vllm benchmark"
|
||||
priority: 100
|
||||
agents:
|
||||
queue: A100
|
||||
plugins:
|
||||
- kubernetes:
|
||||
podSpec:
|
||||
<<: *common_pod_spec
|
||||
containers:
|
||||
- image: vllm/vllm-openai:latest
|
||||
<<: *common_container_settings
|
||||
|
||||
- label: "A100 tgi benchmark"
|
||||
priority: 100
|
||||
agents:
|
||||
queue: A100
|
||||
plugins:
|
||||
- kubernetes:
|
||||
podSpec:
|
||||
<<: *common_pod_spec
|
||||
containers:
|
||||
- image: ghcr.io/huggingface/text-generation-inference:2.1
|
||||
<<: *common_container_settings
|
||||
|
||||
- wait
|
||||
|
||||
- label: "Plot"
|
||||
priority: 100
|
||||
agents:
|
||||
queue: A100
|
||||
plugins:
|
||||
- kubernetes:
|
||||
podSpec:
|
||||
<<: *common_pod_spec
|
||||
containers:
|
||||
- image: vllm/vllm-openai:v0.5.0.post1
|
||||
command:
|
||||
- bash .buildkite/nightly-benchmarks/scripts/nightly-annotate.sh
|
||||
resources:
|
||||
limits:
|
||||
nvidia.com/gpu: 8
|
||||
volumeMounts:
|
||||
- name: devshm
|
||||
mountPath: /dev/shm
|
||||
env:
|
||||
- name: VLLM_USAGE_SOURCE
|
||||
value: ci-test
|
||||
- name: VLLM_SOURCE_CODE_LOC
|
||||
value: /workspace/build/buildkite/vllm/performance-benchmark
|
||||
- name: HF_TOKEN
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: hf-token-secret
|
||||
key: token
|
||||
|
||||
- wait
|
376
.buildkite/nightly-benchmarks/run-benchmarks-suite.sh
Normal file
376
.buildkite/nightly-benchmarks/run-benchmarks-suite.sh
Normal file
@ -0,0 +1,376 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This script should be run inside the CI process
|
||||
# This script assumes that we are already inside the vllm/ directory
|
||||
# Benchmarking results will be available inside vllm/benchmarks/results/
|
||||
|
||||
# Do not set -e, as the mixtral 8x22B model tends to crash occasionally
|
||||
# and we still want to see other benchmarking results even when mixtral crashes.
|
||||
set -o pipefail
|
||||
|
||||
check_gpus() {
|
||||
# check the number of GPUs and GPU type.
|
||||
declare -g gpu_count=$(nvidia-smi --list-gpus | wc -l)
|
||||
if [[ $gpu_count -gt 0 ]]; then
|
||||
echo "GPU found."
|
||||
else
|
||||
echo "Need at least 1 GPU to run benchmarking."
|
||||
exit 1
|
||||
fi
|
||||
declare -g gpu_type=$(echo $(nvidia-smi --query-gpu=name --format=csv,noheader) | awk '{print $2}')
|
||||
echo "GPU type is $gpu_type"
|
||||
}
|
||||
|
||||
check_hf_token() {
|
||||
# check if HF_TOKEN is available and valid
|
||||
if [[ -z "$HF_TOKEN" ]]; then
|
||||
echo "Error: HF_TOKEN is not set."
|
||||
exit 1
|
||||
elif [[ ! "$HF_TOKEN" =~ ^hf_ ]]; then
|
||||
echo "Error: HF_TOKEN does not start with 'hf_'."
|
||||
exit 1
|
||||
else
|
||||
echo "HF_TOKEN is set and valid."
|
||||
fi
|
||||
}
|
||||
|
||||
json2args() {
|
||||
# transforms the JSON string to command line args, and '_' is replaced to '-'
|
||||
# example:
|
||||
# input: { "model": "meta-llama/Llama-2-7b-chat-hf", "tensor_parallel_size": 1 }
|
||||
# output: --model meta-llama/Llama-2-7b-chat-hf --tensor-parallel-size 1
|
||||
local json_string=$1
|
||||
local args=$(
|
||||
echo "$json_string" | jq -r '
|
||||
to_entries |
|
||||
map("--" + (.key | gsub("_"; "-")) + " " + (.value | tostring)) |
|
||||
join(" ")
|
||||
'
|
||||
)
|
||||
echo "$args"
|
||||
}
|
||||
|
||||
wait_for_server() {
|
||||
# wait for vllm server to start
|
||||
# return 1 if vllm server crashes
|
||||
timeout 1200 bash -c '
|
||||
until curl -X POST localhost:8000/v1/completions; do
|
||||
sleep 1
|
||||
done' && return 0 || return 1
|
||||
}
|
||||
|
||||
kill_gpu_processes() {
|
||||
# kill all processes on GPU.
|
||||
pids=$(nvidia-smi --query-compute-apps=pid --format=csv,noheader)
|
||||
if [ -z "$pids" ]; then
|
||||
echo "No GPU processes found."
|
||||
else
|
||||
for pid in $pids; do
|
||||
kill -9 "$pid"
|
||||
echo "Killed process with PID: $pid"
|
||||
done
|
||||
|
||||
echo "All GPU processes have been killed."
|
||||
fi
|
||||
|
||||
# Sometimes kill with pid doesn't work properly, we can also kill all process running python or python3
|
||||
# since we are in container anyway
|
||||
pkill -9 -f python
|
||||
pkill -9 -f python3
|
||||
|
||||
# waiting for GPU processes to be fully killed
|
||||
# loop while nvidia-smi returns any processes
|
||||
while [ -n "$(nvidia-smi --query-compute-apps=pid --format=csv,noheader)" ]; do
|
||||
sleep 1
|
||||
echo "Waiting for GPU processes to be killed"
|
||||
done
|
||||
|
||||
# remove vllm config file
|
||||
rm -rf ~/.config/vllm
|
||||
|
||||
# Print the GPU memory usage
|
||||
# so that we know if all GPU processes are killed.
|
||||
gpu_memory_usage=$(nvidia-smi --query-gpu=memory.used --format=csv,noheader,nounits -i 0)
|
||||
# The memory usage should be 0 MB.
|
||||
echo "GPU 0 Memory Usage: $gpu_memory_usage MB"
|
||||
}
|
||||
|
||||
upload_to_buildkite() {
|
||||
# upload the benchmarking results to buildkite
|
||||
|
||||
# if the agent binary is not found, skip uploading the results, exit 0
|
||||
# Check if buildkite-agent is available in the PATH or at /workspace/buildkite-agent
|
||||
if command -v buildkite-agent >/dev/null 2>&1; then
|
||||
BUILDKITE_AGENT_COMMAND="buildkite-agent"
|
||||
elif [ -f /workspace/buildkite-agent ]; then
|
||||
BUILDKITE_AGENT_COMMAND="/workspace/buildkite-agent"
|
||||
else
|
||||
echo "buildkite-agent binary not found. Skip uploading the results."
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Use the determined command to annotate and upload artifacts
|
||||
$BUILDKITE_AGENT_COMMAND annotate --style "info" --context "$BUILDKITE_LABEL-benchmark-results" < $RESULTS_FOLDER/benchmark_results.md
|
||||
$BUILDKITE_AGENT_COMMAND artifact upload "$RESULTS_FOLDER/*"
|
||||
}
|
||||
|
||||
run_latency_tests() {
|
||||
# run latency tests using `benchmark_latency.py`
|
||||
# $1: a json file specifying latency test cases
|
||||
|
||||
local latency_test_file
|
||||
latency_test_file=$1
|
||||
|
||||
# Iterate over latency tests
|
||||
jq -c '.[]' "$latency_test_file" | while read -r params; do
|
||||
# get the test name, and append the GPU type back to it.
|
||||
test_name=$(echo "$params" | jq -r '.test_name')
|
||||
if [[ ! "$test_name" =~ ^latency_ ]]; then
|
||||
echo "In latency-test.json, test_name must start with \"latency_\"."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# if TEST_SELECTOR is set, only run the test cases that match the selector
|
||||
if [[ -n "$TEST_SELECTOR" ]] && [[ ! "$test_name" =~ $TEST_SELECTOR ]]; then
|
||||
echo "Skip test case $test_name."
|
||||
continue
|
||||
fi
|
||||
|
||||
# get arguments
|
||||
latency_params=$(echo "$params" | jq -r '.parameters')
|
||||
latency_args=$(json2args "$latency_params")
|
||||
|
||||
# check if there is enough GPU to run the test
|
||||
tp=$(echo "$latency_params" | jq -r '.tensor_parallel_size')
|
||||
if [[ $gpu_count -lt $tp ]]; then
|
||||
echo "Required tensor-parallel-size $tp but only $gpu_count GPU found. Skip testcase $testname."
|
||||
continue
|
||||
fi
|
||||
|
||||
latency_command="python3 benchmark_latency.py \
|
||||
--output-json $RESULTS_FOLDER/${test_name}.json \
|
||||
$latency_args"
|
||||
|
||||
echo "Running test case $test_name"
|
||||
echo "Latency command: $latency_command"
|
||||
|
||||
# recoding benchmarking command ang GPU command
|
||||
jq_output=$(jq -n \
|
||||
--arg latency "$latency_command" \
|
||||
--arg gpu "$gpu_type" \
|
||||
'{
|
||||
latency_command: $latency,
|
||||
gpu_type: $gpu
|
||||
}')
|
||||
echo "$jq_output" > "$RESULTS_FOLDER/$test_name.commands"
|
||||
|
||||
# run the benchmark
|
||||
eval "$latency_command"
|
||||
|
||||
kill_gpu_processes
|
||||
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
run_throughput_tests() {
|
||||
# run throughput tests using `benchmark_throughput.py`
|
||||
# $1: a json file specifying throughput test cases
|
||||
|
||||
local throughput_test_file
|
||||
throughput_test_file=$1
|
||||
|
||||
# Iterate over throughput tests
|
||||
jq -c '.[]' "$throughput_test_file" | while read -r params; do
|
||||
# get the test name, and append the GPU type back to it.
|
||||
test_name=$(echo "$params" | jq -r '.test_name')
|
||||
if [[ ! "$test_name" =~ ^throughput_ ]]; then
|
||||
echo "In throughput-test.json, test_name must start with \"throughput_\"."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# if TEST_SELECTOR is set, only run the test cases that match the selector
|
||||
if [[ -n "$TEST_SELECTOR" ]] && [[ ! "$test_name" =~ $TEST_SELECTOR ]]; then
|
||||
echo "Skip test case $test_name."
|
||||
continue
|
||||
fi
|
||||
|
||||
# get arguments
|
||||
throughput_params=$(echo "$params" | jq -r '.parameters')
|
||||
throughput_args=$(json2args "$throughput_params")
|
||||
|
||||
# check if there is enough GPU to run the test
|
||||
tp=$(echo $throughput_params | jq -r '.tensor_parallel_size')
|
||||
if [[ $gpu_count -lt $tp ]]; then
|
||||
echo "Required tensor-parallel-size $tp but only $gpu_count GPU found. Skip testcase $testname."
|
||||
continue
|
||||
fi
|
||||
|
||||
throughput_command="python3 benchmark_throughput.py \
|
||||
--output-json $RESULTS_FOLDER/${test_name}.json \
|
||||
$throughput_args"
|
||||
|
||||
echo "Running test case $test_name"
|
||||
echo "Throughput command: $throughput_command"
|
||||
# recoding benchmarking command ang GPU command
|
||||
jq_output=$(jq -n \
|
||||
--arg command "$throughput_command" \
|
||||
--arg gpu "$gpu_type" \
|
||||
'{
|
||||
throughput_command: $command,
|
||||
gpu_type: $gpu
|
||||
}')
|
||||
echo "$jq_output" > "$RESULTS_FOLDER/$test_name.commands"
|
||||
|
||||
# run the benchmark
|
||||
eval "$throughput_command"
|
||||
|
||||
kill_gpu_processes
|
||||
|
||||
done
|
||||
}
|
||||
|
||||
run_serving_tests() {
|
||||
# run serving tests using `benchmark_serving.py`
|
||||
# $1: a json file specifying serving test cases
|
||||
|
||||
local serving_test_file
|
||||
serving_test_file=$1
|
||||
|
||||
# Iterate over serving tests
|
||||
jq -c '.[]' "$serving_test_file" | while read -r params; do
|
||||
# get the test name, and append the GPU type back to it.
|
||||
test_name=$(echo "$params" | jq -r '.test_name')
|
||||
if [[ ! "$test_name" =~ ^serving_ ]]; then
|
||||
echo "In serving-test.json, test_name must start with \"serving_\"."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# if TEST_SELECTOR is set, only run the test cases that match the selector
|
||||
if [[ -n "$TEST_SELECTOR" ]] && [[ ! "$test_name" =~ $TEST_SELECTOR ]]; then
|
||||
echo "Skip test case $test_name."
|
||||
continue
|
||||
fi
|
||||
|
||||
|
||||
# get client and server arguments
|
||||
server_params=$(echo "$params" | jq -r '.server_parameters')
|
||||
client_params=$(echo "$params" | jq -r '.client_parameters')
|
||||
server_args=$(json2args "$server_params")
|
||||
client_args=$(json2args "$client_params")
|
||||
qps_list=$(echo "$params" | jq -r '.qps_list')
|
||||
qps_list=$(echo "$qps_list" | jq -r '.[] | @sh')
|
||||
echo "Running over qps list $qps_list"
|
||||
|
||||
# check if there is enough GPU to run the test
|
||||
tp=$(echo "$server_params" | jq -r '.tensor_parallel_size')
|
||||
if [[ $gpu_count -lt $tp ]]; then
|
||||
echo "Required tensor-parallel-size $tp but only $gpu_count GPU found. Skip testcase $testname."
|
||||
continue
|
||||
fi
|
||||
|
||||
# check if server model and client model is aligned
|
||||
server_model=$(echo "$server_params" | jq -r '.model')
|
||||
client_model=$(echo "$client_params" | jq -r '.model')
|
||||
if [[ $server_model != "$client_model" ]]; then
|
||||
echo "Server model and client model must be the same. Skip testcase $testname."
|
||||
continue
|
||||
fi
|
||||
|
||||
server_command="python3 \
|
||||
-m vllm.entrypoints.openai.api_server \
|
||||
$server_args"
|
||||
|
||||
# run the server
|
||||
echo "Running test case $test_name"
|
||||
echo "Server command: $server_command"
|
||||
eval "$server_command" &
|
||||
server_pid=$!
|
||||
|
||||
# wait until the server is alive
|
||||
wait_for_server
|
||||
if [ $? -eq 0 ]; then
|
||||
echo ""
|
||||
echo "vllm server is up and running."
|
||||
else
|
||||
echo ""
|
||||
echo "vllm failed to start within the timeout period."
|
||||
fi
|
||||
|
||||
# iterate over different QPS
|
||||
for qps in $qps_list; do
|
||||
# remove the surrounding single quote from qps
|
||||
if [[ "$qps" == *"inf"* ]]; then
|
||||
echo "qps was $qps"
|
||||
qps="inf"
|
||||
echo "now qps is $qps"
|
||||
fi
|
||||
|
||||
new_test_name=$test_name"_qps_"$qps
|
||||
|
||||
client_command="python3 benchmark_serving.py \
|
||||
--save-result \
|
||||
--result-dir $RESULTS_FOLDER \
|
||||
--result-filename ${new_test_name}.json \
|
||||
--request-rate $qps \
|
||||
$client_args"
|
||||
|
||||
echo "Running test case $test_name with qps $qps"
|
||||
echo "Client command: $client_command"
|
||||
|
||||
eval "$client_command"
|
||||
|
||||
# record the benchmarking commands
|
||||
jq_output=$(jq -n \
|
||||
--arg server "$server_command" \
|
||||
--arg client "$client_command" \
|
||||
--arg gpu "$gpu_type" \
|
||||
'{
|
||||
server_command: $server,
|
||||
client_command: $client,
|
||||
gpu_type: $gpu
|
||||
}')
|
||||
echo "$jq_output" > "$RESULTS_FOLDER/${new_test_name}.commands"
|
||||
|
||||
done
|
||||
|
||||
# clean up
|
||||
kill -9 $server_pid
|
||||
kill_gpu_processes
|
||||
done
|
||||
}
|
||||
|
||||
main() {
|
||||
check_gpus
|
||||
check_hf_token
|
||||
|
||||
# dependencies
|
||||
(which wget && which curl) || (apt-get update && apt-get install -y wget curl)
|
||||
(which jq) || (apt-get update && apt-get -y install jq)
|
||||
|
||||
# get the current IP address, required by benchmark_serving.py
|
||||
export VLLM_HOST_IP=$(hostname -I | awk '{print $1}')
|
||||
# turn of the reporting of the status of each request, to clean up the terminal output
|
||||
export VLLM_LOG_LEVEL="WARNING"
|
||||
|
||||
# prepare for benchmarking
|
||||
cd benchmarks || exit 1
|
||||
wget https://huggingface.co/datasets/anon8231489123/ShareGPT_Vicuna_unfiltered/resolve/main/ShareGPT_V3_unfiltered_cleaned_split.json
|
||||
declare -g RESULTS_FOLDER=results/
|
||||
mkdir -p $RESULTS_FOLDER
|
||||
QUICK_BENCHMARK_ROOT=../.buildkite/nightly-benchmarks/
|
||||
|
||||
# benchmarking
|
||||
run_serving_tests $QUICK_BENCHMARK_ROOT/tests/serving-tests.json
|
||||
run_latency_tests $QUICK_BENCHMARK_ROOT/tests/latency-tests.json
|
||||
run_throughput_tests $QUICK_BENCHMARK_ROOT/tests/throughput-tests.json
|
||||
|
||||
|
||||
# postprocess benchmarking results
|
||||
pip install tabulate pandas
|
||||
python3 $QUICK_BENCHMARK_ROOT/scripts/convert-results-json-to-markdown.py
|
||||
|
||||
upload_to_buildkite
|
||||
}
|
||||
|
||||
main "$@"
|
76
.buildkite/nightly-benchmarks/run-nightly-suite.sh
Normal file
76
.buildkite/nightly-benchmarks/run-nightly-suite.sh
Normal file
@ -0,0 +1,76 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -o pipefail
|
||||
set -x
|
||||
|
||||
check_gpus() {
|
||||
# check the number of GPUs and GPU type.
|
||||
declare -g gpu_count=$(nvidia-smi --list-gpus | wc -l)
|
||||
if [[ $gpu_count -gt 0 ]]; then
|
||||
echo "GPU found."
|
||||
else
|
||||
echo "Need at least 1 GPU to run benchmarking."
|
||||
exit 1
|
||||
fi
|
||||
declare -g gpu_type=$(echo $(nvidia-smi --query-gpu=name --format=csv,noheader) | awk '{print $2}')
|
||||
echo "GPU type is $gpu_type"
|
||||
}
|
||||
|
||||
check_hf_token() {
|
||||
# check if HF_TOKEN is available and valid
|
||||
if [[ -z "$HF_TOKEN" ]]; then
|
||||
echo "Error: HF_TOKEN is not set."
|
||||
exit 1
|
||||
elif [[ ! "$HF_TOKEN" =~ ^hf_ ]]; then
|
||||
echo "Error: HF_TOKEN does not start with 'hf_'."
|
||||
exit 1
|
||||
else
|
||||
echo "HF_TOKEN is set and valid."
|
||||
fi
|
||||
}
|
||||
|
||||
main() {
|
||||
|
||||
check_gpus
|
||||
check_hf_token
|
||||
|
||||
df -h
|
||||
|
||||
(which wget && which curl) || (apt-get update && apt-get install -y wget curl)
|
||||
(which jq) || (apt-get update && apt-get -y install jq)
|
||||
|
||||
cd $VLLM_SOURCE_CODE_LOC/benchmarks
|
||||
wget https://huggingface.co/datasets/anon8231489123/ShareGPT_Vicuna_unfiltered/resolve/main/ShareGPT_V3_unfiltered_cleaned_split.json
|
||||
|
||||
|
||||
# run lmdeploy
|
||||
if which lmdeploy >/dev/null; then
|
||||
echo "lmdeploy is available, redirect to run-lmdeploy-nightly.sh"
|
||||
bash ../.buildkite/nightly-benchmarks/scripts/run-lmdeploy-nightly.sh
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# run tgi
|
||||
if [ -e /tgi-entrypoint.sh ]; then
|
||||
echo "tgi is available, redirect to run-tgi-nightly.sh"
|
||||
bash ../.buildkite/nightly-benchmarks/scripts/run-tgi-nightly.sh
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# run trt
|
||||
if which trtllm-build >/dev/null; then
|
||||
echo "trtllm is available, redirect to run-trt-nightly.sh"
|
||||
bash ../.buildkite/nightly-benchmarks/scripts/run-trt-nightly.sh
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# run vllm
|
||||
if [ -e /vllm-workspace ]; then
|
||||
echo "vllm is available, redirect to run-vllm-nightly.sh"
|
||||
bash ../.buildkite/nightly-benchmarks/scripts/run-vllm-nightly.sh
|
||||
exit 0
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
main "$@"
|
@ -0,0 +1,192 @@
|
||||
import json
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
import pandas as pd
|
||||
from tabulate import tabulate
|
||||
|
||||
results_folder = Path("results/")
|
||||
|
||||
# latency results and the keys that will be printed into markdown
|
||||
latency_results = []
|
||||
latency_column_mapping = {
|
||||
"test_name": "Test name",
|
||||
"gpu_type": "GPU",
|
||||
"avg_latency": "Mean latency (ms)",
|
||||
# "P10": "P10 (s)",
|
||||
# "P25": "P25 (s)",
|
||||
"P50": "Median latency (ms)",
|
||||
# "P75": "P75 (s)",
|
||||
# "P90": "P90 (s)",
|
||||
"P99": "P99 latency (ms)",
|
||||
}
|
||||
|
||||
# throughput tests and the keys that will be printed into markdown
|
||||
throughput_results = []
|
||||
throughput_results_column_mapping = {
|
||||
"test_name": "Test name",
|
||||
"gpu_type": "GPU",
|
||||
# "num_requests": "# of req.",
|
||||
# "total_num_tokens": "Total # of tokens",
|
||||
# "elapsed_time": "Elapsed time (s)",
|
||||
"requests_per_second": "Tput (req/s)",
|
||||
# "tokens_per_second": "Tput (tok/s)",
|
||||
}
|
||||
|
||||
# serving results and the keys that will be printed into markdown
|
||||
serving_results = []
|
||||
serving_column_mapping = {
|
||||
"test_name": "Test name",
|
||||
"gpu_type": "GPU",
|
||||
# "completed": "# of req.",
|
||||
"request_throughput": "Tput (req/s)",
|
||||
# "input_throughput": "Input Tput (tok/s)",
|
||||
# "output_throughput": "Output Tput (tok/s)",
|
||||
"mean_ttft_ms": "Mean TTFT (ms)",
|
||||
"median_ttft_ms": "Median TTFT (ms)",
|
||||
"p99_ttft_ms": "P99 TTFT (ms)",
|
||||
# "mean_tpot_ms": "Mean TPOT (ms)",
|
||||
# "median_tpot_ms": "Median",
|
||||
# "p99_tpot_ms": "P99",
|
||||
"mean_itl_ms": "Mean ITL (ms)",
|
||||
"median_itl_ms": "Median ITL (ms)",
|
||||
"p99_itl_ms": "P99 ITL (ms)",
|
||||
}
|
||||
|
||||
|
||||
def read_markdown(file):
|
||||
if os.path.exists(file):
|
||||
with open(file, "r") as f:
|
||||
return f.read() + "\n"
|
||||
else:
|
||||
return f"{file} not found.\n"
|
||||
|
||||
|
||||
def results_to_json(latency, throughput, serving):
|
||||
return json.dumps({
|
||||
'latency': latency.to_dict(),
|
||||
'throughput': throughput.to_dict(),
|
||||
'serving': serving.to_dict()
|
||||
})
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
# collect results
|
||||
for test_file in results_folder.glob("*.json"):
|
||||
|
||||
with open(test_file, "r") as f:
|
||||
raw_result = json.loads(f.read())
|
||||
|
||||
if "serving" in str(test_file):
|
||||
# this result is generated via `benchmark_serving.py`
|
||||
|
||||
# attach the benchmarking command to raw_result
|
||||
with open(test_file.with_suffix(".commands"), "r") as f:
|
||||
command = json.loads(f.read())
|
||||
raw_result.update(command)
|
||||
|
||||
# update the test name of this result
|
||||
raw_result.update({"test_name": test_file.stem})
|
||||
|
||||
# add the result to raw_result
|
||||
serving_results.append(raw_result)
|
||||
continue
|
||||
|
||||
elif "latency" in f.name:
|
||||
# this result is generated via `benchmark_latency.py`
|
||||
|
||||
# attach the benchmarking command to raw_result
|
||||
with open(test_file.with_suffix(".commands"), "r") as f:
|
||||
command = json.loads(f.read())
|
||||
raw_result.update(command)
|
||||
|
||||
# update the test name of this result
|
||||
raw_result.update({"test_name": test_file.stem})
|
||||
|
||||
# get different percentiles
|
||||
for perc in [10, 25, 50, 75, 90, 99]:
|
||||
# Multiply 1000 to convert the time unit from s to ms
|
||||
raw_result.update(
|
||||
{f"P{perc}": 1000 * raw_result["percentiles"][str(perc)]})
|
||||
raw_result["avg_latency"] = raw_result["avg_latency"] * 1000
|
||||
|
||||
# add the result to raw_result
|
||||
latency_results.append(raw_result)
|
||||
continue
|
||||
|
||||
elif "throughput" in f.name:
|
||||
# this result is generated via `benchmark_throughput.py`
|
||||
|
||||
# attach the benchmarking command to raw_result
|
||||
with open(test_file.with_suffix(".commands"), "r") as f:
|
||||
command = json.loads(f.read())
|
||||
raw_result.update(command)
|
||||
|
||||
# update the test name of this result
|
||||
raw_result.update({"test_name": test_file.stem})
|
||||
|
||||
# add the result to raw_result
|
||||
throughput_results.append(raw_result)
|
||||
continue
|
||||
|
||||
print(f"Skipping {test_file}")
|
||||
|
||||
latency_results = pd.DataFrame.from_dict(latency_results)
|
||||
serving_results = pd.DataFrame.from_dict(serving_results)
|
||||
throughput_results = pd.DataFrame.from_dict(throughput_results)
|
||||
|
||||
raw_results_json = results_to_json(latency_results, throughput_results,
|
||||
serving_results)
|
||||
|
||||
# remapping the key, for visualization purpose
|
||||
if not latency_results.empty:
|
||||
latency_results = latency_results[list(
|
||||
latency_column_mapping.keys())].rename(
|
||||
columns=latency_column_mapping)
|
||||
if not serving_results.empty:
|
||||
serving_results = serving_results[list(
|
||||
serving_column_mapping.keys())].rename(
|
||||
columns=serving_column_mapping)
|
||||
if not throughput_results.empty:
|
||||
throughput_results = throughput_results[list(
|
||||
throughput_results_column_mapping.keys())].rename(
|
||||
columns=throughput_results_column_mapping)
|
||||
|
||||
processed_results_json = results_to_json(latency_results,
|
||||
throughput_results,
|
||||
serving_results)
|
||||
|
||||
# get markdown tables
|
||||
latency_md_table = tabulate(latency_results,
|
||||
headers='keys',
|
||||
tablefmt='pipe',
|
||||
showindex=False)
|
||||
serving_md_table = tabulate(serving_results,
|
||||
headers='keys',
|
||||
tablefmt='pipe',
|
||||
showindex=False)
|
||||
throughput_md_table = tabulate(throughput_results,
|
||||
headers='keys',
|
||||
tablefmt='pipe',
|
||||
showindex=False)
|
||||
|
||||
# document the result
|
||||
with open(results_folder / "benchmark_results.md", "w") as f:
|
||||
|
||||
results = read_markdown(
|
||||
"../.buildkite/nightly-benchmarks/tests/descriptions.md")
|
||||
results = results.format(
|
||||
latency_tests_markdown_table=latency_md_table,
|
||||
throughput_tests_markdown_table=throughput_md_table,
|
||||
serving_tests_markdown_table=serving_md_table,
|
||||
benchmarking_results_in_json_string=processed_results_json)
|
||||
f.write(results)
|
||||
|
||||
# document benchmarking results in json
|
||||
with open(results_folder / "benchmark_results.json", "w") as f:
|
||||
|
||||
results = latency_results.to_dict(
|
||||
orient='records') + throughput_results.to_dict(
|
||||
orient='records') + serving_results.to_dict(orient='records')
|
||||
f.write(json.dumps(results))
|
26
.buildkite/nightly-benchmarks/scripts/download-tokenizer.py
Normal file
26
.buildkite/nightly-benchmarks/scripts/download-tokenizer.py
Normal file
@ -0,0 +1,26 @@
|
||||
import argparse
|
||||
|
||||
from transformers import AutoTokenizer
|
||||
|
||||
|
||||
def main(model, cachedir):
|
||||
# Load the tokenizer and save it to the specified directory
|
||||
tokenizer = AutoTokenizer.from_pretrained(model)
|
||||
tokenizer.save_pretrained(cachedir)
|
||||
print(f"Tokenizer saved to {cachedir}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Download and save Hugging Face tokenizer")
|
||||
parser.add_argument("--model",
|
||||
type=str,
|
||||
required=True,
|
||||
help="Name of the model")
|
||||
parser.add_argument("--cachedir",
|
||||
type=str,
|
||||
required=True,
|
||||
help="Directory to save the tokenizer")
|
||||
|
||||
args = parser.parse_args()
|
||||
main(args.model, args.cachedir)
|
@ -0,0 +1,6 @@
|
||||
from lmdeploy.serve.openai.api_client import APIClient
|
||||
|
||||
api_client = APIClient("http://localhost:8000")
|
||||
model_name = api_client.available_models[0]
|
||||
|
||||
print(model_name)
|
102
.buildkite/nightly-benchmarks/scripts/launch-trt-server.sh
Normal file
102
.buildkite/nightly-benchmarks/scripts/launch-trt-server.sh
Normal file
@ -0,0 +1,102 @@
|
||||
#!/bin/bash
|
||||
|
||||
|
||||
server_params=$1
|
||||
common_params=$2
|
||||
|
||||
|
||||
|
||||
model_path=$(echo "$common_params" | jq -r '.model')
|
||||
model_name="${model_path#*/}"
|
||||
model_type=$(echo "$server_params" | jq -r '.model_type')
|
||||
model_dtype=$(echo "$server_params" | jq -r '.model_dtype')
|
||||
model_tp_size=$(echo "$common_params" | jq -r '.tp')
|
||||
max_batch_size=$(echo "$server_params" | jq -r '.max_batch_size')
|
||||
max_input_len=$(echo "$server_params" | jq -r '.max_input_len')
|
||||
max_output_len=$(echo "$server_params" | jq -r '.max_output_len')
|
||||
trt_llm_version=$(echo "$server_params" | jq -r '.trt_llm_version')
|
||||
|
||||
cd ~
|
||||
rm -rf models
|
||||
mkdir -p models
|
||||
cd models
|
||||
models_dir=$(pwd)
|
||||
trt_model_path=${models_dir}/${model_name}-trt-ckpt
|
||||
trt_engine_path=${models_dir}/${model_name}-trt-engine
|
||||
|
||||
cd ~
|
||||
rm -rf tensorrt-demo
|
||||
git clone https://github.com/neuralmagic/tensorrt-demo.git
|
||||
cd tensorrt-demo
|
||||
tensorrt_demo_dir=$(pwd)
|
||||
|
||||
# make sure the parameter inside tensorrt_demo is consistent to envvar
|
||||
sed -i.bak "/key: \"tokenizer_dir\"/,/string_value:/s|string_value: \".*\"|string_value: \"$model_path\"|" ./triton_model_repo/postprocessing/config.pbtxt
|
||||
sed -i.bak "/key: \"tokenizer_dir\"/,/string_value:/s|string_value: \".*\"|string_value: \"$model_path\"|" ./triton_model_repo/preprocessing/config.pbtxt
|
||||
sed -i.bak "s|\(max_batch_size:\s*\)[0-9]*|\1$max_batch_size|g" ./triton_model_repo/ensemble/config.pbtxt
|
||||
sed -i.bak "s|\(max_batch_size:\s*\)[0-9]*|\1$max_batch_size|g" ./triton_model_repo/preprocessing/config.pbtxt
|
||||
sed -i.bak "s|\(max_batch_size:\s*\)[0-9]*|\1$max_batch_size|g" ./triton_model_repo/postprocessing/config.pbtxt
|
||||
sed -i.bak "s|\(max_batch_size:\s*\)[0-9]*|\1$max_batch_size|g" ./triton_model_repo/tensorrt_llm_bls/config.pbtxt
|
||||
|
||||
|
||||
cd /
|
||||
rm -rf tensorrtllm_backend
|
||||
git clone https://github.com/triton-inference-server/tensorrtllm_backend.git
|
||||
git lfs install
|
||||
cd tensorrtllm_backend
|
||||
git checkout $trt_llm_version
|
||||
tensorrtllm_backend_dir=$(pwd)
|
||||
git submodule update --init --recursive
|
||||
cp -r ${tensorrt_demo_dir}/triton_model_repo ${tensorrtllm_backend_dir}/
|
||||
|
||||
cd /tensorrtllm_backend
|
||||
cd ./tensorrt_llm/examples/${model_type}
|
||||
|
||||
|
||||
if echo "$common_params" | jq -e 'has("fp8")' > /dev/null; then
|
||||
|
||||
echo "Key 'fp8' exists in common params. Use quantize.py instead of convert_checkpoint.py"
|
||||
echo "Reference: https://github.com/NVIDIA/TensorRT-LLM/blob/main/examples/llama/README.md"
|
||||
python ../quantization/quantize.py \
|
||||
--model_dir ${model_path} \
|
||||
--dtype ${model_dtype} \
|
||||
--tp_size ${model_tp_size} \
|
||||
--output_dir ${trt_model_path} \
|
||||
--qformat fp8 \
|
||||
--kv_cache_dtype fp8 \
|
||||
--calib_size 2
|
||||
|
||||
else
|
||||
|
||||
echo "Key 'fp8' does not exist in common params. Use convert_checkpoint.py"
|
||||
python3 convert_checkpoint.py \
|
||||
--model_dir ${model_path} \
|
||||
--dtype ${model_dtype} \
|
||||
--tp_size ${model_tp_size} \
|
||||
--output_dir ${trt_model_path}
|
||||
|
||||
fi
|
||||
|
||||
|
||||
|
||||
trtllm-build \
|
||||
--checkpoint_dir=${trt_model_path} \
|
||||
--gpt_attention_plugin=${model_dtype} \
|
||||
--gemm_plugin=${model_dtype} \
|
||||
--remove_input_padding=enable \
|
||||
--paged_kv_cache=enable \
|
||||
--tp_size=${model_tp_size} \
|
||||
--max_batch_size=${max_batch_size} \
|
||||
--max_input_len=${max_input_len} \
|
||||
--max_output_len=${max_output_len} \
|
||||
--max_num_tokens=${max_output_len} \
|
||||
--opt_num_tokens=${max_output_len} \
|
||||
--output_dir=${trt_engine_path}
|
||||
|
||||
cd /tensorrtllm_backend/triton_model_repo
|
||||
rm -rf ./tensorrt_llm/1/*
|
||||
cp -r ${trt_engine_path}/* ./tensorrt_llm/1
|
||||
cd /tensorrtllm_backend
|
||||
python3 scripts/launch_triton_server.py \
|
||||
--world_size=${model_tp_size} \
|
||||
--model_repo=/tensorrtllm_backend/triton_model_repo &
|
40
.buildkite/nightly-benchmarks/scripts/nightly-annotate.sh
Normal file
40
.buildkite/nightly-benchmarks/scripts/nightly-annotate.sh
Normal file
@ -0,0 +1,40 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
set -o pipefail
|
||||
|
||||
|
||||
main() {
|
||||
|
||||
(which wget && which curl) || (apt-get update && apt-get install -y wget curl)
|
||||
(which jq) || (apt-get update && apt-get -y install jq)
|
||||
|
||||
if [ ! -f /workspace/buildkite-agent ]; then
|
||||
echo "buildkite-agent binary not found. Skip plotting the results."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# initial annotation
|
||||
description="$VLLM_SOURCE_CODE_LOC/.buildkite/nightly-benchmarks/nightly-descriptions.md"
|
||||
|
||||
# download results
|
||||
cd $VLLM_SOURCE_CODE_LOC/benchmarks
|
||||
mkdir -p results/
|
||||
/workspace/buildkite-agent artifact download 'results/*nightly_results.json' results/
|
||||
ls
|
||||
ls results/
|
||||
|
||||
# generate figures
|
||||
python3 -m pip install tabulate pandas matplotlib
|
||||
python3 $VLLM_SOURCE_CODE_LOC/.buildkite/nightly-benchmarks/scripts/plot-nightly-results.py \
|
||||
--description $description \
|
||||
--results-folder results/
|
||||
|
||||
# upload results and figures
|
||||
/workspace/buildkite-agent artifact upload "nightly_results.png"
|
||||
/workspace/buildkite-agent artifact upload $VLLM_SOURCE_CODE_LOC/.buildkite/nightly-benchmarks/nightly-pipeline.yaml
|
||||
/workspace/buildkite-agent artifact upload $VLLM_SOURCE_CODE_LOC/.buildkite/nightly-benchmarks/tests/nightly-tests.json
|
||||
/workspace/buildkite-agent annotate --style "success" --context "nightly-benchmarks-results" --append < nightly_results.md
|
||||
}
|
||||
|
||||
main "$@"
|
135
.buildkite/nightly-benchmarks/scripts/plot-nightly-results.py
Normal file
135
.buildkite/nightly-benchmarks/scripts/plot-nightly-results.py
Normal file
@ -0,0 +1,135 @@
|
||||
import argparse
|
||||
import json
|
||||
import math
|
||||
from pathlib import Path
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import pandas as pd
|
||||
from tabulate import tabulate
|
||||
|
||||
|
||||
def parse_arguments():
|
||||
parser = argparse.ArgumentParser(
|
||||
description=
|
||||
'Parse command line arguments for summary-nightly-results script.')
|
||||
parser.add_argument('--results-folder',
|
||||
type=str,
|
||||
required=True,
|
||||
help='The folder where the results are stored.')
|
||||
parser.add_argument('--description',
|
||||
type=str,
|
||||
required=True,
|
||||
help='Description of the results.')
|
||||
|
||||
args = parser.parse_args()
|
||||
return args
|
||||
|
||||
|
||||
def main(args):
|
||||
bar_colors = ['#56B4E9', '#009E73', '#D55E00', '#E69F00']
|
||||
results_folder = Path(args.results_folder)
|
||||
|
||||
results = []
|
||||
|
||||
# collect results
|
||||
for test_file in results_folder.glob("*_nightly_results.json"):
|
||||
with open(test_file, "r") as f:
|
||||
results = results + json.loads(f.read())
|
||||
|
||||
# generate markdown table
|
||||
df = pd.DataFrame.from_dict(results)
|
||||
|
||||
md_table = tabulate(df, headers='keys', tablefmt='pipe', showindex=False)
|
||||
|
||||
with open(args.description, "r") as f:
|
||||
description = f.read()
|
||||
|
||||
description = description.format(
|
||||
nightly_results_benchmarking_table=md_table)
|
||||
|
||||
with open("nightly_results.md", "w") as f:
|
||||
f.write(description)
|
||||
|
||||
plt.rcParams.update({'font.size': 20})
|
||||
|
||||
# plot results
|
||||
fig, axes = plt.subplots(3, 3, figsize=(16, 14))
|
||||
fig.subplots_adjust(hspace=1)
|
||||
methods = ["vllm", "trt", "lmdeploy", "tgi"]
|
||||
for i, model in enumerate(["llama8B", "llama70B", "mixtral8x7B"]):
|
||||
for j, metric in enumerate(["TTFT", "ITL"]):
|
||||
means, stds = [], []
|
||||
for method in methods:
|
||||
target = df['Test name'].str.contains(model)
|
||||
target = target & df['Engine'].str.contains(method)
|
||||
filtered_df = df[target]
|
||||
|
||||
if filtered_df.empty:
|
||||
means.append(0.)
|
||||
stds.append(0.)
|
||||
else:
|
||||
means.append(filtered_df[f"Mean {metric} (ms)"].values[0])
|
||||
std = filtered_df[f"Std {metric} (ms)"].values[0]
|
||||
success = filtered_df["Successful req."].values[0]
|
||||
stds.append(std / math.sqrt(success))
|
||||
|
||||
print(model, metric)
|
||||
print(means, stds)
|
||||
|
||||
ax = axes[i, j + 1]
|
||||
|
||||
bars = ax.bar(
|
||||
["vllm", "trt", "lmdeploy", "tgi"],
|
||||
means,
|
||||
yerr=stds,
|
||||
capsize=10,
|
||||
)
|
||||
for idx, bar in enumerate(bars):
|
||||
bar.set_color(bar_colors[idx])
|
||||
ax.set_ylim(bottom=0)
|
||||
|
||||
ax.set_ylabel(f"{metric} (ms)")
|
||||
ax.set_title(f"{model} {metric}")
|
||||
ax.grid(axis='y')
|
||||
|
||||
metric = "Tput"
|
||||
j = 0
|
||||
if True:
|
||||
tputs = []
|
||||
for method in methods:
|
||||
target = df['Test name'].str.contains(model)
|
||||
target = target & df['Engine'].str.contains(method)
|
||||
filtered_df = df[target]
|
||||
|
||||
if filtered_df.empty:
|
||||
tputs.append(0.)
|
||||
else:
|
||||
input_tput = filtered_df["Input Tput (tok/s)"].values[0]
|
||||
output_tput = filtered_df["Output Tput (tok/s)"].values[0]
|
||||
tputs.append(input_tput + output_tput)
|
||||
|
||||
print(model, metric)
|
||||
print(tputs)
|
||||
|
||||
ax = axes[i, j]
|
||||
|
||||
bars = ax.bar(
|
||||
["vllm", "trt", "lmdeploy", "tgi"],
|
||||
tputs,
|
||||
)
|
||||
for idx, bar in enumerate(bars):
|
||||
bar.set_color(bar_colors[idx])
|
||||
|
||||
ax.set_ylim(bottom=0)
|
||||
|
||||
ax.set_ylabel("Tput (token/s)")
|
||||
ax.set_title(f"{model} {metric}")
|
||||
ax.grid(axis='y')
|
||||
|
||||
fig.tight_layout()
|
||||
fig.savefig("nightly_results.png", bbox_inches='tight', dpi=400)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
args = parse_arguments()
|
||||
main(args)
|
218
.buildkite/nightly-benchmarks/scripts/run-lmdeploy-nightly.sh
Normal file
218
.buildkite/nightly-benchmarks/scripts/run-lmdeploy-nightly.sh
Normal file
@ -0,0 +1,218 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -o pipefail
|
||||
|
||||
check_gpus() {
|
||||
# check the number of GPUs and GPU type.
|
||||
declare -g gpu_count=$(nvidia-smi --list-gpus | wc -l)
|
||||
if [[ $gpu_count -gt 0 ]]; then
|
||||
echo "GPU found."
|
||||
else
|
||||
echo "Need at least 1 GPU to run benchmarking."
|
||||
exit 1
|
||||
fi
|
||||
declare -g gpu_type=$(echo $(nvidia-smi --query-gpu=name --format=csv,noheader) | awk '{print $2}')
|
||||
echo "GPU type is $gpu_type"
|
||||
}
|
||||
|
||||
kill_gpu_processes() {
|
||||
pkill lmdeploy || true
|
||||
# waiting for GPU processes to be fully killed
|
||||
sleep 10
|
||||
# Print the GPU memory usage
|
||||
# so that we know if all GPU processes are killed.
|
||||
gpu_memory_usage=$(nvidia-smi --query-gpu=memory.used --format=csv,noheader,nounits -i 0)
|
||||
# The memory usage should be 0 MB.
|
||||
echo "GPU 0 Memory Usage: $gpu_memory_usage MB"
|
||||
}
|
||||
|
||||
json2args() {
|
||||
# transforms the JSON string to command line args, and '_' is replaced to '-'
|
||||
# example:
|
||||
# input: { "model": "meta-llama/Llama-2-7b-chat-hf", "tensor_parallel_size": 1 }
|
||||
# output: --model meta-llama/Llama-2-7b-chat-hf --tensor-parallel-size 1
|
||||
local json_string=$1
|
||||
local args=$(
|
||||
echo "$json_string" | jq -r '
|
||||
to_entries |
|
||||
map("--" + (.key | gsub("_"; "-")) + " " + (.value | tostring)) |
|
||||
join(" ")
|
||||
'
|
||||
)
|
||||
echo "$args"
|
||||
}
|
||||
|
||||
wait_for_server() {
|
||||
# wait for vllm server to start
|
||||
# return 1 if vllm server crashes
|
||||
timeout 1200 bash -c '
|
||||
until curl -s localhost:8000/v1/completions > /dev/null; do
|
||||
sleep 1
|
||||
done' && return 0 || return 1
|
||||
}
|
||||
|
||||
run_serving_tests() {
|
||||
# run serving tests using `benchmark_serving.py`
|
||||
# $1: a json file specifying serving test cases
|
||||
|
||||
local serving_test_file
|
||||
serving_test_file=$1
|
||||
|
||||
# Iterate over serving tests
|
||||
jq -c '.[]' "$serving_test_file" | while read -r params; do
|
||||
# get the test name, and append the GPU type back to it.
|
||||
test_name=$(echo "$params" | jq -r '.test_name')
|
||||
|
||||
# if TEST_SELECTOR is set, only run the test cases that match the selector
|
||||
if [[ -n "$TEST_SELECTOR" ]] && [[ ! "$test_name" =~ $TEST_SELECTOR ]]; then
|
||||
echo "Skip test case $test_name."
|
||||
continue
|
||||
fi
|
||||
|
||||
# append lmdeploy to the test name
|
||||
test_name=lmdeploy_$test_name
|
||||
|
||||
# get common parameters
|
||||
common_params=$(echo "$params" | jq -r '.common_parameters')
|
||||
model=$(echo "$common_params" | jq -r '.model')
|
||||
tp=$(echo "$common_params" | jq -r '.tp')
|
||||
dataset_name=$(echo "$common_params" | jq -r '.dataset_name')
|
||||
dataset_path=$(echo "$common_params" | jq -r '.dataset_path')
|
||||
port=$(echo "$common_params" | jq -r '.port')
|
||||
num_prompts=$(echo "$common_params" | jq -r '.num_prompts')
|
||||
|
||||
|
||||
|
||||
# get client and server arguments
|
||||
server_params=$(echo "$params" | jq -r '.lmdeploy_server_parameters')
|
||||
client_params=$(echo "$params" | jq -r '.lmdeploy_client_parameters')
|
||||
server_args=$(json2args "$server_params")
|
||||
client_args=$(json2args "$client_params")
|
||||
qps_list=$(echo "$params" | jq -r '.qps_list')
|
||||
qps_list=$(echo "$qps_list" | jq -r '.[] | @sh')
|
||||
echo "Running over qps list $qps_list"
|
||||
|
||||
# check if there is enough GPU to run the test
|
||||
if [[ $gpu_count -lt $tp ]]; then
|
||||
echo "Required tensor-parallel-size $tp but only $gpu_count GPU found. Skip testcase $test_name."
|
||||
continue
|
||||
fi
|
||||
|
||||
# prepare tokenizer
|
||||
rm -rf /tokenizer_cache
|
||||
mkdir /tokenizer_cache
|
||||
python ../.buildkite/nightly-benchmarks/scripts/download-tokenizer.py \
|
||||
--model "$model" \
|
||||
--cachedir /tokenizer_cache
|
||||
|
||||
server_command="lmdeploy serve api_server $model \
|
||||
--tp $tp \
|
||||
--server-port $port \
|
||||
$server_args"
|
||||
|
||||
# run the server
|
||||
echo "Running test case $test_name"
|
||||
echo "Server command: $server_command"
|
||||
bash -c "$server_command" &
|
||||
|
||||
# wait until the server is alive
|
||||
wait_for_server
|
||||
if [ $? -eq 0 ]; then
|
||||
echo ""
|
||||
echo "lmdeploy server is up and running."
|
||||
else
|
||||
echo ""
|
||||
echo "lmdeploy failed to start within the timeout period."
|
||||
break
|
||||
fi
|
||||
|
||||
# get model name
|
||||
model_name=$(python ../.buildkite/nightly-benchmarks/scripts/get-lmdeploy-modelname.py)
|
||||
|
||||
# iterate over different QPS
|
||||
for qps in $qps_list; do
|
||||
# remove the surrounding single quote from qps
|
||||
if [[ "$qps" == *"inf"* ]]; then
|
||||
echo "qps was $qps"
|
||||
qps="inf"
|
||||
echo "now qps is $qps"
|
||||
fi
|
||||
|
||||
new_test_name=$test_name"_qps_"$qps
|
||||
|
||||
client_command="python3 benchmark_serving.py \
|
||||
--backend lmdeploy \
|
||||
--tokenizer /tokenizer_cache \
|
||||
--dataset-name $dataset_name \
|
||||
--dataset-path $dataset_path \
|
||||
--num-prompts $num_prompts \
|
||||
--port $port \
|
||||
--save-result \
|
||||
--result-dir $RESULTS_FOLDER \
|
||||
--result-filename ${new_test_name}.json \
|
||||
--request-rate $qps \
|
||||
--model \"$model_name\" \
|
||||
$client_args"
|
||||
|
||||
echo "Running test case $test_name with qps $qps"
|
||||
echo "Client command: $client_command"
|
||||
|
||||
eval "$client_command"
|
||||
|
||||
# record the benchmarking commands
|
||||
jq_output=$(jq -n \
|
||||
--arg server "$server_command" \
|
||||
--arg client "$client_command" \
|
||||
--arg gpu "$gpu_type" \
|
||||
--arg engine "lmdeploy" \
|
||||
'{
|
||||
server_command: $server,
|
||||
client_command: $client,
|
||||
gpu_type: $gpu,
|
||||
engine: $engine
|
||||
}')
|
||||
echo "$jq_output" >"$RESULTS_FOLDER/${new_test_name}.commands"
|
||||
|
||||
done
|
||||
|
||||
# clean up
|
||||
kill_gpu_processes
|
||||
rm -rf /root/.cache/huggingface/*
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
upload_to_buildkite() {
|
||||
# upload the benchmarking results to buildkite
|
||||
|
||||
# if the agent binary is not found, skip uploading the results, exit 0
|
||||
if [ ! -f /workspace/buildkite-agent ]; then
|
||||
echo "buildkite-agent binary not found. Skip uploading the results."
|
||||
return 0
|
||||
fi
|
||||
# /workspace/buildkite-agent annotate --style "success" --context "benchmark-results" --append < $RESULTS_FOLDER/${CURRENT_LLM_SERVING_ENGINE}_nightly_results.md
|
||||
/workspace/buildkite-agent artifact upload "$RESULTS_FOLDER/*"
|
||||
}
|
||||
|
||||
|
||||
main() {
|
||||
|
||||
check_gpus
|
||||
# enter vllm directory
|
||||
cd $VLLM_SOURCE_CODE_LOC/benchmarks
|
||||
|
||||
declare -g RESULTS_FOLDER=results/
|
||||
mkdir -p $RESULTS_FOLDER
|
||||
BENCHMARK_ROOT=../.buildkite/nightly-benchmarks/
|
||||
|
||||
python -m pip install transformers==4.41.2
|
||||
|
||||
export CURRENT_LLM_SERVING_ENGINE=lmdeploy
|
||||
run_serving_tests $BENCHMARK_ROOT/tests/nightly-tests.json
|
||||
python -m pip install tabulate pandas
|
||||
python $BENCHMARK_ROOT/scripts/summary-nightly-results.py
|
||||
upload_to_buildkite
|
||||
|
||||
}
|
||||
|
||||
main "$@"
|
216
.buildkite/nightly-benchmarks/scripts/run-tgi-nightly.sh
Normal file
216
.buildkite/nightly-benchmarks/scripts/run-tgi-nightly.sh
Normal file
@ -0,0 +1,216 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -o pipefail
|
||||
|
||||
check_gpus() {
|
||||
# check the number of GPUs and GPU type.
|
||||
declare -g gpu_count=$(nvidia-smi --list-gpus | wc -l)
|
||||
if [[ $gpu_count -gt 0 ]]; then
|
||||
echo "GPU found."
|
||||
else
|
||||
echo "Need at least 1 GPU to run benchmarking."
|
||||
exit 1
|
||||
fi
|
||||
declare -g gpu_type=$(echo $(nvidia-smi --query-gpu=name --format=csv,noheader) | awk '{print $2}')
|
||||
echo "GPU type is $gpu_type"
|
||||
}
|
||||
|
||||
kill_gpu_processes() {
|
||||
pkill text-generation || true
|
||||
# waiting for GPU processes to be fully killed
|
||||
sleep 10
|
||||
# Print the GPU memory usage
|
||||
# so that we know if all GPU processes are killed.
|
||||
gpu_memory_usage=$(nvidia-smi --query-gpu=memory.used --format=csv,noheader,nounits -i 0)
|
||||
# The memory usage should be 0 MB.
|
||||
echo "GPU 0 Memory Usage: $gpu_memory_usage MB"
|
||||
}
|
||||
|
||||
json2args() {
|
||||
# transforms the JSON string to command line args, and '_' is replaced to '-'
|
||||
# example:
|
||||
# input: { "model": "meta-llama/Llama-2-7b-chat-hf", "tensor_parallel_size": 1 }
|
||||
# output: --model meta-llama/Llama-2-7b-chat-hf --tensor-parallel-size 1
|
||||
local json_string=$1
|
||||
local args=$(
|
||||
echo "$json_string" | jq -r '
|
||||
to_entries |
|
||||
map("--" + (.key | gsub("_"; "-")) + " " + (.value | tostring)) |
|
||||
join(" ")
|
||||
'
|
||||
)
|
||||
echo "$args"
|
||||
}
|
||||
|
||||
wait_for_server() {
|
||||
timeout 1200 bash -c '
|
||||
until curl -s localhost:8000/generate_stream > /dev/null; do
|
||||
sleep 1
|
||||
done' && return 0 || return 1
|
||||
}
|
||||
|
||||
run_serving_tests() {
|
||||
# run serving tests using `benchmark_serving.py`
|
||||
# $1: a json file specifying serving test cases
|
||||
|
||||
local serving_test_file
|
||||
serving_test_file=$1
|
||||
|
||||
# Iterate over serving tests
|
||||
jq -c '.[]' "$serving_test_file" | while read -r params; do
|
||||
# get the test name, and append the GPU type back to it.
|
||||
test_name=$(echo "$params" | jq -r '.test_name')
|
||||
|
||||
|
||||
# if TEST_SELECTOR is set, only run the test cases that match the selector
|
||||
if [[ -n "$TEST_SELECTOR" ]] && [[ ! "$test_name" =~ $TEST_SELECTOR ]]; then
|
||||
echo "Skip test case $test_name."
|
||||
continue
|
||||
fi
|
||||
|
||||
# append tgi to the test name
|
||||
test_name=tgi_$test_name
|
||||
|
||||
# get common parameters
|
||||
common_params=$(echo "$params" | jq -r '.common_parameters')
|
||||
model=$(echo "$common_params" | jq -r '.model')
|
||||
tp=$(echo "$common_params" | jq -r '.tp')
|
||||
dataset_name=$(echo "$common_params" | jq -r '.dataset_name')
|
||||
dataset_path=$(echo "$common_params" | jq -r '.dataset_path')
|
||||
port=$(echo "$common_params" | jq -r '.port')
|
||||
num_prompts=$(echo "$common_params" | jq -r '.num_prompts')
|
||||
|
||||
# get client and server arguments
|
||||
server_params=$(echo "$params" | jq -r '.tgi_server_parameters')
|
||||
client_params=$(echo "$params" | jq -r '.tgi_client_parameters')
|
||||
server_args=$(json2args "$server_params")
|
||||
client_args=$(json2args "$client_params")
|
||||
qps_list=$(echo "$params" | jq -r '.qps_list')
|
||||
qps_list=$(echo "$qps_list" | jq -r '.[] | @sh')
|
||||
echo "Running over qps list $qps_list"
|
||||
|
||||
# check if there is enough GPU to run the test
|
||||
if [[ $gpu_count -lt $tp ]]; then
|
||||
echo "Required num-shard $tp but only $gpu_count GPU found. Skip testcase $test_name."
|
||||
continue
|
||||
fi
|
||||
|
||||
if echo "$common_params" | jq -e 'has("fp8")' > /dev/null; then
|
||||
echo "Key 'fp8' exists in common params."
|
||||
server_command="/tgi-entrypoint.sh \
|
||||
--model-id $model \
|
||||
--num-shard $tp \
|
||||
--port $port \
|
||||
--quantize fp8 \
|
||||
$server_args"
|
||||
else
|
||||
echo "Key 'fp8' does not exist in common params."
|
||||
server_command="/tgi-entrypoint.sh \
|
||||
--model-id $model \
|
||||
--num-shard $tp \
|
||||
--port $port \
|
||||
$server_args"
|
||||
fi
|
||||
|
||||
|
||||
|
||||
|
||||
# run the server
|
||||
echo "Running test case $test_name"
|
||||
echo "Server command: $server_command"
|
||||
eval "$server_command" &
|
||||
|
||||
# wait until the server is alive
|
||||
wait_for_server
|
||||
if [ $? -eq 0 ]; then
|
||||
echo ""
|
||||
echo "tgi server is up and running."
|
||||
else
|
||||
echo ""
|
||||
echo "tgi failed to start within the timeout period."
|
||||
break
|
||||
fi
|
||||
|
||||
# iterate over different QPS
|
||||
for qps in $qps_list; do
|
||||
# remove the surrounding single quote from qps
|
||||
if [[ "$qps" == *"inf"* ]]; then
|
||||
echo "qps was $qps"
|
||||
qps="inf"
|
||||
echo "now qps is $qps"
|
||||
fi
|
||||
|
||||
new_test_name=$test_name"_qps_"$qps
|
||||
|
||||
client_command="python3 benchmark_serving.py \
|
||||
--backend tgi \
|
||||
--model $model \
|
||||
--dataset-name $dataset_name \
|
||||
--dataset-path $dataset_path \
|
||||
--num-prompts $num_prompts \
|
||||
--port $port \
|
||||
--save-result \
|
||||
--result-dir $RESULTS_FOLDER \
|
||||
--result-filename ${new_test_name}.json \
|
||||
--request-rate $qps \
|
||||
$client_args"
|
||||
|
||||
echo "Running test case $test_name with qps $qps"
|
||||
echo "Client command: $client_command"
|
||||
|
||||
eval "$client_command"
|
||||
|
||||
# record the benchmarking commands
|
||||
jq_output=$(jq -n \
|
||||
--arg server "$server_command" \
|
||||
--arg client "$client_command" \
|
||||
--arg gpu "$gpu_type" \
|
||||
--arg engine "tgi" \
|
||||
'{
|
||||
server_command: $server,
|
||||
client_command: $client,
|
||||
gpu_type: $gpu,
|
||||
engine: $engine
|
||||
}')
|
||||
echo "$jq_output" >"$RESULTS_FOLDER/${new_test_name}.commands"
|
||||
|
||||
done
|
||||
|
||||
# clean up
|
||||
kill_gpu_processes
|
||||
rm -rf /root/.cache/huggingface/*
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
|
||||
upload_to_buildkite() {
|
||||
# upload the benchmarking results to buildkite
|
||||
|
||||
# if the agent binary is not found, skip uploading the results, exit 0
|
||||
if [ ! -f /workspace/buildkite-agent ]; then
|
||||
echo "buildkite-agent binary not found. Skip uploading the results."
|
||||
return 0
|
||||
fi
|
||||
# /workspace/buildkite-agent annotate --style "success" --context "benchmark-results" --append < $RESULTS_FOLDER/${CURRENT_LLM_SERVING_ENGINE}_nightly_results.md
|
||||
/workspace/buildkite-agent artifact upload "$RESULTS_FOLDER/*"
|
||||
}
|
||||
|
||||
main() {
|
||||
|
||||
check_gpus
|
||||
# enter vllm directory
|
||||
cd $VLLM_SOURCE_CODE_LOC/benchmarks
|
||||
declare -g RESULTS_FOLDER=results/
|
||||
mkdir -p $RESULTS_FOLDER
|
||||
BENCHMARK_ROOT=../.buildkite/nightly-benchmarks/
|
||||
|
||||
export CURRENT_LLM_SERVING_ENGINE=tgi
|
||||
run_serving_tests $BENCHMARK_ROOT/tests/nightly-tests.json
|
||||
python -m pip install tabulate pandas
|
||||
python $BENCHMARK_ROOT/scripts/summary-nightly-results.py
|
||||
upload_to_buildkite
|
||||
|
||||
}
|
||||
|
||||
main "$@"
|
214
.buildkite/nightly-benchmarks/scripts/run-trt-nightly.sh
Normal file
214
.buildkite/nightly-benchmarks/scripts/run-trt-nightly.sh
Normal file
@ -0,0 +1,214 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -o pipefail
|
||||
|
||||
check_gpus() {
|
||||
# check the number of GPUs and GPU type.
|
||||
declare -g gpu_count=$(nvidia-smi --list-gpus | wc -l)
|
||||
if [[ $gpu_count -gt 0 ]]; then
|
||||
echo "GPU found."
|
||||
else
|
||||
echo "Need at least 1 GPU to run benchmarking."
|
||||
exit 1
|
||||
fi
|
||||
declare -g gpu_type=$(echo $(nvidia-smi --query-gpu=name --format=csv,noheader) | awk '{print $2}')
|
||||
echo "GPU type is $gpu_type"
|
||||
}
|
||||
|
||||
kill_gpu_processes() {
|
||||
pkill tritonserver || true
|
||||
# waiting for GPU processes to be fully killed
|
||||
sleep 20
|
||||
# Print the GPU memory usage
|
||||
# so that we know if all GPU processes are killed.
|
||||
gpu_memory_usage=$(nvidia-smi --query-gpu=memory.used --format=csv,noheader,nounits -i 0)
|
||||
# The memory usage should be 0 MB.
|
||||
echo "GPU 0 Memory Usage: $gpu_memory_usage MB"
|
||||
}
|
||||
|
||||
json2args() {
|
||||
# transforms the JSON string to command line args, and '_' is replaced to '-'
|
||||
# example:
|
||||
# input: { "model": "meta-llama/Llama-2-7b-chat-hf", "tensor_parallel_size": 1 }
|
||||
# output: --model meta-llama/Llama-2-7b-chat-hf --tensor-parallel-size 1
|
||||
local json_string=$1
|
||||
local args=$(
|
||||
echo "$json_string" | jq -r '
|
||||
to_entries |
|
||||
map("--" + (.key | gsub("_"; "-")) + " " + (.value | tostring)) |
|
||||
join(" ")
|
||||
'
|
||||
)
|
||||
echo "$args"
|
||||
}
|
||||
|
||||
wait_for_server() {
|
||||
timeout 1200 bash -c '
|
||||
until curl -s localhost:8000/generate_stream > /dev/null; do
|
||||
sleep 1
|
||||
done' && return 0 || return 1
|
||||
}
|
||||
|
||||
run_serving_tests() {
|
||||
# run serving tests using `benchmark_serving.py`
|
||||
# $1: a json file specifying serving test cases
|
||||
|
||||
local serving_test_file
|
||||
serving_test_file=$1
|
||||
|
||||
# Iterate over serving tests
|
||||
jq -c '.[]' "$serving_test_file" | while read -r params; do
|
||||
# get the test name, and append the GPU type back to it.
|
||||
test_name=$(echo "$params" | jq -r '.test_name')
|
||||
|
||||
# if TEST_SELECTOR is set, only run the test cases that match the selector
|
||||
if [[ -n "$TEST_SELECTOR" ]] && [[ ! "$test_name" =~ $TEST_SELECTOR ]]; then
|
||||
echo "Skip test case $test_name."
|
||||
continue
|
||||
fi
|
||||
|
||||
# append trt to the test name
|
||||
test_name=trt_$test_name
|
||||
|
||||
# get common parameters
|
||||
common_params=$(echo "$params" | jq -r '.common_parameters')
|
||||
model=$(echo "$common_params" | jq -r '.model')
|
||||
tp=$(echo "$common_params" | jq -r '.tp')
|
||||
dataset_name=$(echo "$common_params" | jq -r '.dataset_name')
|
||||
dataset_path=$(echo "$common_params" | jq -r '.dataset_path')
|
||||
port=$(echo "$common_params" | jq -r '.port')
|
||||
num_prompts=$(echo "$common_params" | jq -r '.num_prompts')
|
||||
|
||||
# get client and server arguments
|
||||
server_params=$(echo "$params" | jq -r '.trt_server_parameters')
|
||||
client_params=$(echo "$params" | jq -r '.trt_client_parameters')
|
||||
client_args=$(json2args "$client_params")
|
||||
qps_list=$(echo "$params" | jq -r '.qps_list')
|
||||
qps_list=$(echo "$qps_list" | jq -r '.[] | @sh')
|
||||
echo "Running over qps list $qps_list"
|
||||
|
||||
# check if there is enough GPU to run the test
|
||||
if [[ $gpu_count -lt $tp ]]; then
|
||||
echo "Required model_tp_size $tp but only $gpu_count GPU found. Skip testcase $test_name."
|
||||
continue
|
||||
fi
|
||||
|
||||
|
||||
|
||||
cd $VLLM_SOURCE_CODE_LOC/benchmarks
|
||||
|
||||
|
||||
echo "Running test case $test_name"
|
||||
bash ../.buildkite/nightly-benchmarks/scripts/launch-trt-server.sh "$server_params" "$common_params"
|
||||
|
||||
# wait until the server is alive
|
||||
wait_for_server
|
||||
if [ $? -eq 0 ]; then
|
||||
echo ""
|
||||
echo "trt server is up and running."
|
||||
else
|
||||
echo ""
|
||||
echo "trt failed to start within the timeout period."
|
||||
break
|
||||
fi
|
||||
|
||||
# prepare tokenizer
|
||||
cd $VLLM_SOURCE_CODE_LOC/benchmarks
|
||||
rm -rf /tokenizer_cache
|
||||
mkdir /tokenizer_cache
|
||||
python ../.buildkite/nightly-benchmarks/scripts/download-tokenizer.py \
|
||||
--model "$model" \
|
||||
--cachedir /tokenizer_cache
|
||||
cd $VLLM_SOURCE_CODE_LOC/benchmarks
|
||||
|
||||
|
||||
# iterate over different QPS
|
||||
for qps in $qps_list; do
|
||||
# remove the surrounding single quote from qps
|
||||
if [[ "$qps" == *"inf"* ]]; then
|
||||
echo "qps was $qps"
|
||||
qps="inf"
|
||||
echo "now qps is $qps"
|
||||
fi
|
||||
|
||||
new_test_name=$test_name"_qps_"$qps
|
||||
|
||||
client_command="python3 benchmark_serving.py \
|
||||
--backend tensorrt-llm \
|
||||
--tokenizer /tokenizer_cache \
|
||||
--model $model \
|
||||
--dataset-name $dataset_name \
|
||||
--dataset-path $dataset_path \
|
||||
--num-prompts $num_prompts \
|
||||
--port $port \
|
||||
--save-result \
|
||||
--result-dir $RESULTS_FOLDER \
|
||||
--result-filename ${new_test_name}.json \
|
||||
--request-rate $qps \
|
||||
$client_args"
|
||||
|
||||
echo "Running test case $test_name with qps $qps"
|
||||
echo "Client command: $client_command"
|
||||
|
||||
eval "$client_command"
|
||||
|
||||
server_command=""
|
||||
# record the benchmarking commands
|
||||
jq_output=$(jq -n \
|
||||
--arg server "$server_command" \
|
||||
--arg client "$client_command" \
|
||||
--arg gpu "$gpu_type" \
|
||||
--arg engine "trt" \
|
||||
'{
|
||||
server_command: $server,
|
||||
client_command: $client,
|
||||
gpu_type: $gpu,
|
||||
engine: $engine
|
||||
}')
|
||||
echo "$jq_output" >"$RESULTS_FOLDER/${new_test_name}.commands"
|
||||
|
||||
done
|
||||
|
||||
# clean up
|
||||
kill_gpu_processes
|
||||
rm -rf /root/.cache/huggingface/*
|
||||
done
|
||||
}
|
||||
|
||||
upload_to_buildkite() {
|
||||
# upload the benchmarking results to buildkite
|
||||
|
||||
# if the agent binary is not found, skip uploading the results, exit 0
|
||||
if [ ! -f /workspace/buildkite-agent ]; then
|
||||
echo "buildkite-agent binary not found. Skip uploading the results."
|
||||
return 0
|
||||
fi
|
||||
# /workspace/buildkite-agent annotate --style "success" --context "benchmark-results" --append < $RESULTS_FOLDER/${CURRENT_LLM_SERVING_ENGINE}_nightly_results.md
|
||||
/workspace/buildkite-agent artifact upload "$RESULTS_FOLDER/*"
|
||||
}
|
||||
|
||||
|
||||
main() {
|
||||
|
||||
check_gpus
|
||||
|
||||
|
||||
# enter vllm directory
|
||||
cd $VLLM_SOURCE_CODE_LOC/benchmarks
|
||||
|
||||
declare -g RESULTS_FOLDER=results/
|
||||
mkdir -p $RESULTS_FOLDER
|
||||
BENCHMARK_ROOT=../.buildkite/nightly-benchmarks/
|
||||
|
||||
# update transformers package, to make sure mixtral tokenizer is available
|
||||
python -m pip install transformers -U
|
||||
|
||||
export CURRENT_LLM_SERVING_ENGINE=trt
|
||||
run_serving_tests $BENCHMARK_ROOT/tests/nightly-tests.json
|
||||
python -m pip install tabulate pandas
|
||||
python $BENCHMARK_ROOT/scripts/summary-nightly-results.py
|
||||
upload_to_buildkite
|
||||
|
||||
}
|
||||
|
||||
main "$@"
|
221
.buildkite/nightly-benchmarks/scripts/run-vllm-nightly.sh
Normal file
221
.buildkite/nightly-benchmarks/scripts/run-vllm-nightly.sh
Normal file
@ -0,0 +1,221 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -o pipefail
|
||||
|
||||
check_gpus() {
|
||||
# check the number of GPUs and GPU type.
|
||||
declare -g gpu_count=$(nvidia-smi --list-gpus | wc -l)
|
||||
if [[ $gpu_count -gt 0 ]]; then
|
||||
echo "GPU found."
|
||||
else
|
||||
echo "Need at least 1 GPU to run benchmarking."
|
||||
exit 1
|
||||
fi
|
||||
declare -g gpu_type=$(echo $(nvidia-smi --query-gpu=name --format=csv,noheader) | awk '{print $2}')
|
||||
echo "GPU type is $gpu_type"
|
||||
}
|
||||
|
||||
kill_gpu_processes() {
|
||||
# kill all processes on GPU.
|
||||
pkill pt_main_thread
|
||||
sleep 10
|
||||
|
||||
# remove vllm config file
|
||||
rm -rf ~/.config/vllm
|
||||
|
||||
# Print the GPU memory usage
|
||||
# so that we know if all GPU processes are killed.
|
||||
gpu_memory_usage=$(nvidia-smi --query-gpu=memory.used --format=csv,noheader,nounits -i 0)
|
||||
# The memory usage should be 0 MB.
|
||||
echo "GPU 0 Memory Usage: $gpu_memory_usage MB"
|
||||
}
|
||||
|
||||
json2args() {
|
||||
# transforms the JSON string to command line args, and '_' is replaced to '-'
|
||||
# example:
|
||||
# input: { "model": "meta-llama/Llama-2-7b-chat-hf", "tensor_parallel_size": 1 }
|
||||
# output: --model meta-llama/Llama-2-7b-chat-hf --tensor-parallel-size 1
|
||||
local json_string=$1
|
||||
local args=$(
|
||||
echo "$json_string" | jq -r '
|
||||
to_entries |
|
||||
map("--" + (.key | gsub("_"; "-")) + " " + (.value | tostring)) |
|
||||
join(" ")
|
||||
'
|
||||
)
|
||||
echo "$args"
|
||||
}
|
||||
|
||||
wait_for_server() {
|
||||
# wait for vllm server to start
|
||||
# return 1 if vllm server crashes
|
||||
timeout 1200 bash -c '
|
||||
until curl -s localhost:8000/v1/completions > /dev/null; do
|
||||
sleep 1
|
||||
done' && return 0 || return 1
|
||||
}
|
||||
|
||||
run_serving_tests() {
|
||||
# run serving tests using `benchmark_serving.py`
|
||||
# $1: a json file specifying serving test cases
|
||||
|
||||
local serving_test_file
|
||||
serving_test_file=$1
|
||||
|
||||
# Iterate over serving tests
|
||||
jq -c '.[]' "$serving_test_file" | while read -r params; do
|
||||
# get the test name, and append the GPU type back to it.
|
||||
test_name=$(echo "$params" | jq -r '.test_name')
|
||||
|
||||
# if TEST_SELECTOR is set, only run the test cases that match the selector
|
||||
if [[ -n "$TEST_SELECTOR" ]] && [[ ! "$test_name" =~ $TEST_SELECTOR ]]; then
|
||||
echo "Skip test case $test_name."
|
||||
continue
|
||||
fi
|
||||
|
||||
# append vllm to the test name
|
||||
test_name=vllm_$test_name
|
||||
|
||||
|
||||
# get common parameters
|
||||
common_params=$(echo "$params" | jq -r '.common_parameters')
|
||||
model=$(echo "$common_params" | jq -r '.model')
|
||||
tp=$(echo "$common_params" | jq -r '.tp')
|
||||
dataset_name=$(echo "$common_params" | jq -r '.dataset_name')
|
||||
dataset_path=$(echo "$common_params" | jq -r '.dataset_path')
|
||||
port=$(echo "$common_params" | jq -r '.port')
|
||||
num_prompts=$(echo "$common_params" | jq -r '.num_prompts')
|
||||
|
||||
# get client and server arguments
|
||||
server_params=$(echo "$params" | jq -r '.vllm_server_parameters')
|
||||
client_params=$(echo "$params" | jq -r '.vllm_client_parameters')
|
||||
server_args=$(json2args "$server_params")
|
||||
client_args=$(json2args "$client_params")
|
||||
qps_list=$(echo "$params" | jq -r '.qps_list')
|
||||
qps_list=$(echo "$qps_list" | jq -r '.[] | @sh')
|
||||
echo "Running over qps list $qps_list"
|
||||
|
||||
# check if there is enough GPU to run the test
|
||||
if [[ $gpu_count -lt $tp ]]; then
|
||||
echo "Required tensor-parallel-size $tp but only $gpu_count GPU found. Skip testcase $test_name."
|
||||
continue
|
||||
fi
|
||||
|
||||
if echo "$common_params" | jq -e 'has("fp8")' > /dev/null; then
|
||||
echo "Key 'fp8' exists in common params. Use neuralmagic fp8 model for convenience."
|
||||
model=$(echo "$common_params" | jq -r '.neuralmagic_quantized_model')
|
||||
server_command="python3 \
|
||||
-m vllm.entrypoints.openai.api_server \
|
||||
-tp $tp \
|
||||
--model $model \
|
||||
--port $port \
|
||||
$server_args"
|
||||
else
|
||||
echo "Key 'fp8' does not exist in common params."
|
||||
server_command="python3 \
|
||||
-m vllm.entrypoints.openai.api_server \
|
||||
-tp $tp \
|
||||
--model $model \
|
||||
--port $port \
|
||||
$server_args"
|
||||
fi
|
||||
|
||||
# run the server
|
||||
echo "Running test case $test_name"
|
||||
echo "Server command: $server_command"
|
||||
eval "$server_command" &
|
||||
|
||||
# wait until the server is alive
|
||||
wait_for_server
|
||||
if [ $? -eq 0 ]; then
|
||||
echo ""
|
||||
echo "vllm server is up and running."
|
||||
else
|
||||
echo ""
|
||||
echo "vllm failed to start within the timeout period."
|
||||
break
|
||||
fi
|
||||
|
||||
# iterate over different QPS
|
||||
for qps in $qps_list; do
|
||||
# remove the surrounding single quote from qps
|
||||
if [[ "$qps" == *"inf"* ]]; then
|
||||
echo "qps was $qps"
|
||||
qps="inf"
|
||||
echo "now qps is $qps"
|
||||
fi
|
||||
|
||||
new_test_name=$test_name"_qps_"$qps
|
||||
|
||||
client_command="python3 benchmark_serving.py \
|
||||
--backend vllm \
|
||||
--model $model \
|
||||
--dataset-name $dataset_name \
|
||||
--dataset-path $dataset_path \
|
||||
--num-prompts $num_prompts \
|
||||
--port $port \
|
||||
--save-result \
|
||||
--result-dir $RESULTS_FOLDER \
|
||||
--result-filename ${new_test_name}.json \
|
||||
--request-rate $qps \
|
||||
$client_args"
|
||||
|
||||
echo "Running test case $test_name with qps $qps"
|
||||
echo "Client command: $client_command"
|
||||
|
||||
eval "$client_command"
|
||||
|
||||
# record the benchmarking commands
|
||||
jq_output=$(jq -n \
|
||||
--arg server "$server_command" \
|
||||
--arg client "$client_command" \
|
||||
--arg gpu "$gpu_type" \
|
||||
--arg engine "vllm" \
|
||||
'{
|
||||
server_command: $server,
|
||||
client_command: $client,
|
||||
gpu_type: $gpu,
|
||||
engine: $engine
|
||||
}')
|
||||
echo "$jq_output" >"$RESULTS_FOLDER/${new_test_name}.commands"
|
||||
|
||||
done
|
||||
|
||||
# clean up
|
||||
kill_gpu_processes
|
||||
rm -rf /root/.cache/huggingface/*
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
upload_to_buildkite() {
|
||||
# upload the benchmarking results to buildkite
|
||||
|
||||
# if the agent binary is not found, skip uploading the results, exit 0
|
||||
if [ ! -f /workspace/buildkite-agent ]; then
|
||||
echo "buildkite-agent binary not found. Skip uploading the results."
|
||||
return 0
|
||||
fi
|
||||
# /workspace/buildkite-agent annotate --style "success" --context "benchmark-results" --append < $RESULTS_FOLDER/${CURRENT_LLM_SERVING_ENGINE}_nightly_results.md
|
||||
/workspace/buildkite-agent artifact upload "$RESULTS_FOLDER/*"
|
||||
}
|
||||
|
||||
main() {
|
||||
|
||||
check_gpus
|
||||
# enter vllm directory
|
||||
cd $VLLM_SOURCE_CODE_LOC/benchmarks
|
||||
declare -g RESULTS_FOLDER=results/
|
||||
mkdir -p $RESULTS_FOLDER
|
||||
BENCHMARK_ROOT=../.buildkite/nightly-benchmarks/
|
||||
|
||||
export CURRENT_LLM_SERVING_ENGINE=vllm
|
||||
run_serving_tests $BENCHMARK_ROOT/tests/nightly-tests.json
|
||||
|
||||
python3 -m pip install tabulate pandas
|
||||
python3 $BENCHMARK_ROOT/scripts/summary-nightly-results.py
|
||||
upload_to_buildkite
|
||||
|
||||
}
|
||||
|
||||
main "$@"
|
@ -0,0 +1,76 @@
|
||||
import datetime
|
||||
import json
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
import pandas as pd
|
||||
from tabulate import tabulate
|
||||
|
||||
results_folder = Path("results/")
|
||||
|
||||
# serving results and the keys that will be printed into markdown
|
||||
serving_results = []
|
||||
serving_column_mapping = {
|
||||
"test_name": "Test name",
|
||||
"gpu_type": "GPU",
|
||||
"completed": "Successful req.",
|
||||
"request_throughput": "Tput (req/s)",
|
||||
"mean_ttft_ms": "Mean TTFT (ms)",
|
||||
"std_ttft_ms": "Std TTFT (ms)",
|
||||
"mean_itl_ms": "Mean ITL (ms)",
|
||||
"std_itl_ms": "Std ITL (ms)",
|
||||
"input_throughput": "Input Tput (tok/s)",
|
||||
"output_throughput": "Output Tput (tok/s)",
|
||||
"engine": "Engine",
|
||||
}
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
# collect results
|
||||
for test_file in results_folder.glob("*.json"):
|
||||
|
||||
with open(test_file, "r") as f:
|
||||
raw_result = json.loads(f.read())
|
||||
|
||||
# attach the benchmarking command to raw_result
|
||||
with open(test_file.with_suffix(".commands"), "r") as f:
|
||||
command = json.loads(f.read())
|
||||
raw_result.update(command)
|
||||
|
||||
# update the test name of this result
|
||||
raw_result.update({"test_name": test_file.stem})
|
||||
|
||||
# add the result to raw_result
|
||||
serving_results.append(raw_result)
|
||||
continue
|
||||
|
||||
serving_results = pd.DataFrame.from_dict(serving_results)
|
||||
|
||||
if not serving_results.empty:
|
||||
serving_results = serving_results[list(
|
||||
serving_column_mapping.keys())].rename(
|
||||
columns=serving_column_mapping)
|
||||
|
||||
serving_md_table_with_headers = tabulate(serving_results,
|
||||
headers='keys',
|
||||
tablefmt='pipe',
|
||||
showindex=False)
|
||||
# remove the first line of header
|
||||
serving_md_table_lines = serving_md_table_with_headers.split('\n')
|
||||
serving_md_table_without_header = '\n'.join(serving_md_table_lines[2:])
|
||||
|
||||
prefix = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
|
||||
prefix = prefix + "_" + os.environ.get("CURRENT_LLM_SERVING_ENGINE")
|
||||
|
||||
# document benchmarking results in markdown
|
||||
with open(results_folder / f"{prefix}_nightly_results.md", "w") as f:
|
||||
# document results with header.
|
||||
# for those who wants to reproduce our benchmark.
|
||||
f.write(serving_md_table_with_headers)
|
||||
f.write('\n')
|
||||
|
||||
# document benchmarking results in json
|
||||
with open(results_folder / f"{prefix}_nightly_results.json", "w") as f:
|
||||
|
||||
results = serving_results.to_dict(orient='records')
|
||||
f.write(json.dumps(results))
|
17
.buildkite/nightly-benchmarks/scripts/wait-for-image.sh
Normal file
17
.buildkite/nightly-benchmarks/scripts/wait-for-image.sh
Normal file
@ -0,0 +1,17 @@
|
||||
#!/bin/sh
|
||||
TOKEN=$(curl -s -L "https://public.ecr.aws/token?service=public.ecr.aws&scope=repository:q9t5s3a7/vllm-ci-test-repo:pull" | jq -r .token)
|
||||
URL="https://public.ecr.aws/v2/q9t5s3a7/vllm-ci-test-repo/manifests/$BUILDKITE_COMMIT"
|
||||
|
||||
retries=0
|
||||
while [ $retries -lt 1000 ]; do
|
||||
if [ $(curl -s -L -H "Authorization: Bearer $TOKEN" -o /dev/null -w "%{http_code}" $URL) -eq 200 ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "Waiting for image to be available..."
|
||||
|
||||
retries=$((retries + 1))
|
||||
sleep 5
|
||||
done
|
||||
|
||||
exit 1
|
67
.buildkite/nightly-benchmarks/tests/descriptions.md
Normal file
67
.buildkite/nightly-benchmarks/tests/descriptions.md
Normal file
@ -0,0 +1,67 @@
|
||||
|
||||
## Latency tests
|
||||
|
||||
This test suite aims to test vllm's end-to-end latency under a controlled setup.
|
||||
|
||||
- Input length: 32 tokens.
|
||||
- Output length: 128 tokens.
|
||||
- Batch size: fixed (8).
|
||||
- Models: llama-3 8B, llama-3 70B, mixtral 8x7B.
|
||||
- Evaluation metrics: end-to-end latency (mean, median, p99).
|
||||
|
||||
### Latency benchmarking results
|
||||
|
||||
{latency_tests_markdown_table}
|
||||
|
||||
## Throughput tests
|
||||
|
||||
This test suite aims to test vllm's throughput.
|
||||
|
||||
- Input length: randomly sample 200 prompts from ShareGPT dataset (with fixed random seed).
|
||||
- Output length: the corresponding output length of these 200 prompts.
|
||||
- Batch size: dynamically determined by vllm to achieve maximum throughput.
|
||||
- Models: llama-3 8B, llama-3 70B, mixtral 8x7B.
|
||||
- Evaluation metrics: throughput.
|
||||
|
||||
### Throughput benchmarking results
|
||||
|
||||
{throughput_tests_markdown_table}
|
||||
|
||||
## Serving tests
|
||||
|
||||
This test suite aims to test vllm's real serving metrics.
|
||||
|
||||
- Input length: randomly sample 200 prompts from ShareGPT dataset (with fixed random seed).
|
||||
- Output length: the corresponding output length of these 200 prompts.
|
||||
- Batch size: dynamically determined by vllm and the arrival pattern of the requests.
|
||||
- **Average QPS (query per second)**: 1, 4, 16 and inf. QPS = inf means all requests come at once. For other QPS values, the arrival time of each query is determined using a random Poisson process (with fixed random seed).
|
||||
- Models: llama-3 8B, llama-3 70B, mixtral 8x7B.
|
||||
- Evaluation metrics: throughput, TTFT (time to the first token, with mean, median and p99), ITL (inter-token latency, with mean, median and p99).
|
||||
|
||||
### Serving benchmarking results
|
||||
|
||||
{serving_tests_markdown_table}
|
||||
|
||||
## json version of the benchmarking tables
|
||||
|
||||
This section contains the data of the markdown tables above in JSON format.
|
||||
You can load the benchmarking tables into pandas dataframes as follows:
|
||||
|
||||
```python
|
||||
import json
|
||||
import pandas as pd
|
||||
|
||||
benchmarking_results_json = """The json string"""
|
||||
benchmarking_results = json.loads(benchmarking_results_json)
|
||||
latency_results = pd.DataFrame.from_dict(benchmarking_results["latency"])
|
||||
throughput_results = pd.DataFrame.from_dict(benchmarking_results["throughput"])
|
||||
serving_results = pd.DataFrame.from_dict(benchmarking_results["serving"])
|
||||
```
|
||||
|
||||
The json string for all benchmarking tables:
|
||||
```json
|
||||
{benchmarking_results_in_json_string}
|
||||
```
|
||||
|
||||
You can also check the raw experiment data in the Artifact tab of the Buildkite page.
|
||||
|
32
.buildkite/nightly-benchmarks/tests/latency-tests.json
Normal file
32
.buildkite/nightly-benchmarks/tests/latency-tests.json
Normal file
@ -0,0 +1,32 @@
|
||||
[
|
||||
{
|
||||
"test_name": "latency_llama8B_tp1",
|
||||
"parameters": {
|
||||
"model": "meta-llama/Meta-Llama-3-8B",
|
||||
"tensor_parallel_size": 1,
|
||||
"load_format": "dummy",
|
||||
"num_iters_warmup": 5,
|
||||
"num_iters": 15
|
||||
}
|
||||
},
|
||||
{
|
||||
"test_name": "latency_llama70B_tp4",
|
||||
"parameters": {
|
||||
"model": "meta-llama/Meta-Llama-3-70B-Instruct",
|
||||
"tensor_parallel_size": 4,
|
||||
"load_format": "dummy",
|
||||
"num-iters-warmup": 5,
|
||||
"num-iters": 15
|
||||
}
|
||||
},
|
||||
{
|
||||
"test_name": "latency_mixtral8x7B_tp2",
|
||||
"parameters": {
|
||||
"model": "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
||||
"tensor_parallel_size": 2,
|
||||
"load_format": "dummy",
|
||||
"num-iters-warmup": 5,
|
||||
"num-iters": 15
|
||||
}
|
||||
}
|
||||
]
|
116
.buildkite/nightly-benchmarks/tests/nightly-tests.json
Normal file
116
.buildkite/nightly-benchmarks/tests/nightly-tests.json
Normal file
@ -0,0 +1,116 @@
|
||||
[
|
||||
{
|
||||
"test_name": "llama8B_tp1",
|
||||
"qps_list": [4],
|
||||
"common_parameters": {
|
||||
"model": "meta-llama/Meta-Llama-3-8B",
|
||||
"tp": 1,
|
||||
"dataset_name": "sharegpt",
|
||||
"dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json",
|
||||
"num_prompts": 500,
|
||||
"port": 8000
|
||||
},
|
||||
"lmdeploy_server_parameters": {
|
||||
},
|
||||
"lmdeploy_client_parameters": {
|
||||
},
|
||||
"tgi_server_parameters": {
|
||||
},
|
||||
"tgi_client_parameters": {
|
||||
"endpoint": "/generate_stream"
|
||||
},
|
||||
"trt_server_parameters": {
|
||||
"model_type": "llama",
|
||||
"model_dtype": "float16",
|
||||
"max_batch_size": 256,
|
||||
"max_input_len": 4096,
|
||||
"max_output_len": 4096,
|
||||
"trt_llm_version": "r24.04"
|
||||
},
|
||||
"trt_client_parameters": {
|
||||
"endpoint": "/v2/models/ensemble/generate_stream"
|
||||
},
|
||||
"vllm_server_parameters": {
|
||||
"disable_log_stats": "",
|
||||
"disable_log_requests": ""
|
||||
},
|
||||
"vllm_client_parameters": {
|
||||
}
|
||||
},
|
||||
{
|
||||
"test_name": "llama70B_tp4",
|
||||
"qps_list": [2],
|
||||
"common_parameters": {
|
||||
"model": "meta-llama/Meta-Llama-3-70B-Instruct",
|
||||
"tp": 4,
|
||||
"dataset_name": "sharegpt",
|
||||
"dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json",
|
||||
"num_prompts": 500,
|
||||
"port": 8000
|
||||
},
|
||||
"lmdeploy_server_parameters": {
|
||||
},
|
||||
"lmdeploy_client_parameters": {
|
||||
},
|
||||
"tgi_server_parameters": {
|
||||
},
|
||||
"tgi_client_parameters": {
|
||||
"endpoint": "/generate_stream"
|
||||
},
|
||||
"trt_server_parameters": {
|
||||
"model_type": "llama",
|
||||
"model_dtype": "float16",
|
||||
"max_batch_size": 256,
|
||||
"max_input_len": 4096,
|
||||
"max_output_len": 4096,
|
||||
"trt_llm_version": "r24.04"
|
||||
},
|
||||
"trt_client_parameters": {
|
||||
"endpoint": "/v2/models/ensemble/generate_stream"
|
||||
},
|
||||
"vllm_server_parameters": {
|
||||
"disable_log_stats": "",
|
||||
"disable_log_requests": ""
|
||||
},
|
||||
"vllm_client_parameters": {
|
||||
}
|
||||
},
|
||||
{
|
||||
"test_name": "mixtral8x7B_tp2",
|
||||
"qps_list": [2],
|
||||
"common_parameters": {
|
||||
"model": "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
||||
"tp": 2,
|
||||
"dataset_name": "sharegpt",
|
||||
"dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json",
|
||||
"num_prompts": 500,
|
||||
"port": 8000
|
||||
},
|
||||
"lmdeploy_server_parameters": {
|
||||
},
|
||||
"lmdeploy_client_parameters": {
|
||||
},
|
||||
"tgi_server_parameters": {
|
||||
},
|
||||
"tgi_client_parameters": {
|
||||
"endpoint": "/generate_stream"
|
||||
},
|
||||
"trt_server_parameters": {
|
||||
"model_type": "llama",
|
||||
"model_dtype": "float16",
|
||||
"max_batch_size": 256,
|
||||
"max_input_len": 4096,
|
||||
"max_output_len": 4096,
|
||||
"trt_llm_version": "r24.04"
|
||||
},
|
||||
"trt_client_parameters": {
|
||||
"endpoint": "/v2/models/ensemble/generate_stream"
|
||||
},
|
||||
"vllm_server_parameters": {
|
||||
"disable_log_stats": "",
|
||||
"disable_log_requests": ""
|
||||
},
|
||||
"vllm_client_parameters": {
|
||||
}
|
||||
}
|
||||
]
|
59
.buildkite/nightly-benchmarks/tests/serving-tests.json
Normal file
59
.buildkite/nightly-benchmarks/tests/serving-tests.json
Normal file
@ -0,0 +1,59 @@
|
||||
[
|
||||
{
|
||||
"test_name": "serving_llama8B_tp1_sharegpt",
|
||||
"qps_list": [1, 4, 16, "inf"],
|
||||
"server_parameters": {
|
||||
"model": "meta-llama/Meta-Llama-3-8B",
|
||||
"tensor_parallel_size": 1,
|
||||
"swap_space": 16,
|
||||
"disable_log_stats": "",
|
||||
"disable_log_requests": "",
|
||||
"load_format": "dummy"
|
||||
},
|
||||
"client_parameters": {
|
||||
"model": "meta-llama/Meta-Llama-3-8B",
|
||||
"backend": "vllm",
|
||||
"dataset_name": "sharegpt",
|
||||
"dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json",
|
||||
"num_prompts": 200
|
||||
}
|
||||
},
|
||||
{
|
||||
"test_name": "serving_llama70B_tp4_sharegpt",
|
||||
"qps_list": [1, 4, 16, "inf"],
|
||||
"server_parameters": {
|
||||
"model": "meta-llama/Meta-Llama-3-70B-Instruct",
|
||||
"tensor_parallel_size": 4,
|
||||
"swap_space": 16,
|
||||
"disable_log_stats": "",
|
||||
"disable_log_requests": "",
|
||||
"load_format": "dummy"
|
||||
},
|
||||
"client_parameters": {
|
||||
"model": "meta-llama/Meta-Llama-3-70B-Instruct",
|
||||
"backend": "vllm",
|
||||
"dataset_name": "sharegpt",
|
||||
"dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json",
|
||||
"num_prompts": 200
|
||||
}
|
||||
},
|
||||
{
|
||||
"test_name": "serving_mixtral8x7B_tp2_sharegpt",
|
||||
"qps_list": [1, 4, 16, "inf"],
|
||||
"server_parameters": {
|
||||
"model": "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
||||
"tensor_parallel_size": 2,
|
||||
"swap_space": 16,
|
||||
"disable_log_stats": "",
|
||||
"disable_log_requests": "",
|
||||
"load_format": "dummy"
|
||||
},
|
||||
"client_parameters": {
|
||||
"model": "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
||||
"backend": "vllm",
|
||||
"dataset_name": "sharegpt",
|
||||
"dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json",
|
||||
"num_prompts": 200
|
||||
}
|
||||
}
|
||||
]
|
35
.buildkite/nightly-benchmarks/tests/throughput-tests.json
Normal file
35
.buildkite/nightly-benchmarks/tests/throughput-tests.json
Normal file
@ -0,0 +1,35 @@
|
||||
[
|
||||
{
|
||||
"test_name": "throughput_llama8B_tp1",
|
||||
"parameters": {
|
||||
"model": "meta-llama/Meta-Llama-3-8B",
|
||||
"tensor_parallel_size": 1,
|
||||
"load_format": "dummy",
|
||||
"dataset": "./ShareGPT_V3_unfiltered_cleaned_split.json",
|
||||
"num_prompts": 200,
|
||||
"backend": "vllm"
|
||||
}
|
||||
},
|
||||
{
|
||||
"test_name": "throughput_llama70B_tp4",
|
||||
"parameters": {
|
||||
"model": "meta-llama/Meta-Llama-3-70B-Instruct",
|
||||
"tensor_parallel_size": 4,
|
||||
"load_format": "dummy",
|
||||
"dataset": "./ShareGPT_V3_unfiltered_cleaned_split.json",
|
||||
"num_prompts": 200,
|
||||
"backend": "vllm"
|
||||
}
|
||||
},
|
||||
{
|
||||
"test_name": "throughput_mixtral8x7B_tp2",
|
||||
"parameters": {
|
||||
"model": "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
||||
"tensor_parallel_size": 2,
|
||||
"load_format": "dummy",
|
||||
"dataset": "./ShareGPT_V3_unfiltered_cleaned_split.json",
|
||||
"num_prompts": 200,
|
||||
"backend": "vllm"
|
||||
}
|
||||
}
|
||||
]
|
17
.buildkite/release-pipeline.yaml
Normal file
17
.buildkite/release-pipeline.yaml
Normal file
@ -0,0 +1,17 @@
|
||||
steps:
|
||||
- label: "Build wheel - CUDA {{matrix.cuda_version}}"
|
||||
agents:
|
||||
queue: cpu_queue
|
||||
commands:
|
||||
- "DOCKER_BUILDKIT=1 docker build --build-arg max_jobs=16 --build-arg USE_SCCACHE=1 --build-arg CUDA_VERSION={{matrix.cuda_version}} --tag vllm-ci:build-image --target build --progress plain ."
|
||||
- "mkdir artifacts"
|
||||
- "docker run --rm -v $(pwd)/artifacts:/artifacts_host vllm-ci:build-image bash -c 'cp -r dist /artifacts_host && chmod -R a+rw /artifacts_host'"
|
||||
# rename the files to change linux -> manylinux1
|
||||
- "for f in artifacts/dist/*.whl; do mv -- \"$$f\" \"$${f/linux/manylinux1}\"; done"
|
||||
- "aws s3 cp --recursive artifacts/dist s3://vllm-wheels/$BUILDKITE_COMMIT/"
|
||||
- "aws s3 cp --recursive artifacts/dist s3://vllm-wheels/nightly/"
|
||||
matrix:
|
||||
setup:
|
||||
cuda_version:
|
||||
- "11.8.0"
|
||||
- "12.1.0"
|
77
.buildkite/run-amd-test.sh
Normal file
77
.buildkite/run-amd-test.sh
Normal file
@ -0,0 +1,77 @@
|
||||
# This script runs test inside the corresponding ROCm docker container.
|
||||
set -ex
|
||||
|
||||
# Print ROCm version
|
||||
echo "--- Confirming Clean Initial State"
|
||||
while true; do
|
||||
sleep 3
|
||||
if grep -q clean /opt/amdgpu/etc/gpu_state; then
|
||||
echo "GPUs state is \"clean\""
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
echo "--- ROCm info"
|
||||
rocminfo
|
||||
|
||||
# cleanup older docker images
|
||||
cleanup_docker() {
|
||||
# Get Docker's root directory
|
||||
docker_root=$(docker info -f '{{.DockerRootDir}}')
|
||||
if [ -z "$docker_root" ]; then
|
||||
echo "Failed to determine Docker root directory."
|
||||
exit 1
|
||||
fi
|
||||
echo "Docker root directory: $docker_root"
|
||||
# Check disk usage of the filesystem where Docker's root directory is located
|
||||
disk_usage=$(df "$docker_root" | tail -1 | awk '{print $5}' | sed 's/%//')
|
||||
# Define the threshold
|
||||
threshold=70
|
||||
if [ "$disk_usage" -gt "$threshold" ]; then
|
||||
echo "Disk usage is above $threshold%. Cleaning up Docker images and volumes..."
|
||||
# Remove dangling images (those that are not tagged and not used by any container)
|
||||
docker image prune -f
|
||||
# Remove unused volumes
|
||||
docker volume prune -f
|
||||
echo "Docker images and volumes cleanup completed."
|
||||
else
|
||||
echo "Disk usage is below $threshold%. No cleanup needed."
|
||||
fi
|
||||
}
|
||||
|
||||
# Call the cleanup docker function
|
||||
cleanup_docker
|
||||
|
||||
echo "--- Resetting GPUs"
|
||||
|
||||
echo "reset" > /opt/amdgpu/etc/gpu_state
|
||||
|
||||
while true; do
|
||||
sleep 3
|
||||
if grep -q clean /opt/amdgpu/etc/gpu_state; then
|
||||
echo "GPUs state is \"clean\""
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
echo "--- Pulling container"
|
||||
image_name="rocmshared/vllm-ci:${BUILDKITE_COMMIT}"
|
||||
container_name="rocm_${BUILDKITE_COMMIT}_$(tr -dc A-Za-z0-9 < /dev/urandom | head -c 10; echo)"
|
||||
docker pull ${image_name}
|
||||
|
||||
remove_docker_container() {
|
||||
docker rm -f ${container_name} || docker image rm -f ${image_name} || true
|
||||
}
|
||||
trap remove_docker_container EXIT
|
||||
|
||||
echo "--- Running container"
|
||||
|
||||
docker run \
|
||||
--device /dev/kfd --device /dev/dri \
|
||||
--network host \
|
||||
--rm \
|
||||
-e HF_TOKEN \
|
||||
--name ${container_name} \
|
||||
${image_name} \
|
||||
/bin/bash -c "${@}"
|
||||
|
78
.buildkite/run-benchmarks.sh
Normal file
78
.buildkite/run-benchmarks.sh
Normal file
@ -0,0 +1,78 @@
|
||||
# This script is run by buildkite to run the benchmarks and upload the results to buildkite
|
||||
|
||||
set -ex
|
||||
set -o pipefail
|
||||
|
||||
# cd into parent directory of this file
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")/.."
|
||||
|
||||
(which wget && which curl) || (apt-get update && apt-get install -y wget curl)
|
||||
|
||||
# run python-based benchmarks and upload the result to buildkite
|
||||
python3 benchmarks/benchmark_latency.py --output-json latency_results.json 2>&1 | tee benchmark_latency.txt
|
||||
bench_latency_exit_code=$?
|
||||
|
||||
python3 benchmarks/benchmark_throughput.py --input-len 256 --output-len 256 --output-json throughput_results.json 2>&1 | tee benchmark_throughput.txt
|
||||
bench_throughput_exit_code=$?
|
||||
|
||||
# run server-based benchmarks and upload the result to buildkite
|
||||
python3 -m vllm.entrypoints.openai.api_server --model meta-llama/Llama-2-7b-chat-hf &
|
||||
server_pid=$!
|
||||
wget https://huggingface.co/datasets/anon8231489123/ShareGPT_Vicuna_unfiltered/resolve/main/ShareGPT_V3_unfiltered_cleaned_split.json
|
||||
|
||||
# wait for server to start, timeout after 600 seconds
|
||||
timeout 600 bash -c 'until curl localhost:8000/v1/models; do sleep 1; done' || exit 1
|
||||
python3 benchmarks/benchmark_serving.py \
|
||||
--backend vllm \
|
||||
--dataset-name sharegpt \
|
||||
--dataset-path ./ShareGPT_V3_unfiltered_cleaned_split.json \
|
||||
--model meta-llama/Llama-2-7b-chat-hf \
|
||||
--num-prompts 20 \
|
||||
--endpoint /v1/completions \
|
||||
--tokenizer meta-llama/Llama-2-7b-chat-hf \
|
||||
--save-result \
|
||||
2>&1 | tee benchmark_serving.txt
|
||||
bench_serving_exit_code=$?
|
||||
kill $server_pid
|
||||
|
||||
# write the results into a markdown file
|
||||
echo "### Latency Benchmarks" >> benchmark_results.md
|
||||
sed -n '1p' benchmark_latency.txt >> benchmark_results.md # first line
|
||||
echo "" >> benchmark_results.md
|
||||
sed -n '$p' benchmark_latency.txt >> benchmark_results.md # last line
|
||||
|
||||
echo "### Throughput Benchmarks" >> benchmark_results.md
|
||||
sed -n '1p' benchmark_throughput.txt >> benchmark_results.md # first line
|
||||
echo "" >> benchmark_results.md
|
||||
sed -n '$p' benchmark_throughput.txt >> benchmark_results.md # last line
|
||||
|
||||
echo "### Serving Benchmarks" >> benchmark_results.md
|
||||
sed -n '1p' benchmark_serving.txt >> benchmark_results.md # first line
|
||||
echo "" >> benchmark_results.md
|
||||
echo '```' >> benchmark_results.md
|
||||
tail -n 24 benchmark_serving.txt >> benchmark_results.md # last 24 lines
|
||||
echo '```' >> benchmark_results.md
|
||||
|
||||
# if the agent binary is not found, skip uploading the results, exit 0
|
||||
if [ ! -f /usr/bin/buildkite-agent ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# upload the results to buildkite
|
||||
buildkite-agent annotate --style "info" --context "benchmark-results" < benchmark_results.md
|
||||
|
||||
# exit with the exit code of the benchmarks
|
||||
if [ $bench_latency_exit_code -ne 0 ]; then
|
||||
exit $bench_latency_exit_code
|
||||
fi
|
||||
|
||||
if [ $bench_throughput_exit_code -ne 0 ]; then
|
||||
exit $bench_throughput_exit_code
|
||||
fi
|
||||
|
||||
if [ $bench_serving_exit_code -ne 0 ]; then
|
||||
exit $bench_serving_exit_code
|
||||
fi
|
||||
|
||||
rm ShareGPT_V3_unfiltered_cleaned_split.json
|
||||
buildkite-agent artifact upload "*.json"
|
28
.buildkite/run-cpu-test.sh
Normal file
28
.buildkite/run-cpu-test.sh
Normal file
@ -0,0 +1,28 @@
|
||||
# This script build the CPU docker image and run the offline inference inside the container.
|
||||
# It serves a sanity check for compilation and basic model usage.
|
||||
set -ex
|
||||
|
||||
# Try building the docker image
|
||||
docker build -t cpu-test -f Dockerfile.cpu .
|
||||
docker build --build-arg VLLM_CPU_DISABLE_AVX512="true" -t cpu-test-avx2 -f Dockerfile.cpu .
|
||||
|
||||
# Setup cleanup
|
||||
remove_docker_container() { docker rm -f cpu-test cpu-test-avx2 || true; }
|
||||
trap remove_docker_container EXIT
|
||||
remove_docker_container
|
||||
|
||||
# Run the image
|
||||
docker run -itd --entrypoint /bin/bash -v ~/.cache/huggingface:/root/.cache/huggingface --cpuset-cpus=48-95 \
|
||||
--cpuset-mems=1 --network host -e HF_TOKEN --env VLLM_CPU_KVCACHE_SPACE=4 --name cpu-test cpu-test
|
||||
docker run -itd --entrypoint /bin/bash -v ~/.cache/huggingface:/root/.cache/huggingface --cpuset-cpus=48-95 \
|
||||
--cpuset-mems=1 --network host -e HF_TOKEN --env VLLM_CPU_KVCACHE_SPACE=4 --name cpu-test-avx2 cpu-test-avx2
|
||||
|
||||
# offline inference
|
||||
docker exec cpu-test bash -c "python3 examples/offline_inference.py"
|
||||
docker exec cpu-test-avx2 bash -c "python3 examples/offline_inference.py"
|
||||
|
||||
# Run basic model test
|
||||
docker exec cpu-test bash -c "cd tests;
|
||||
pip install pytest Pillow protobuf
|
||||
cd ../
|
||||
pytest -v -s tests/models -m \"not vlm\" --ignore=tests/models/test_embedding.py --ignore=tests/models/test_registry.py --ignore=tests/models/test_jamba.py" # Mamba on CPU is not supported
|
105
.buildkite/run-multi-node-test.sh
Executable file
105
.buildkite/run-multi-node-test.sh
Executable file
@ -0,0 +1,105 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -euox pipefail
|
||||
|
||||
if [[ $# -lt 4 ]]; then
|
||||
echo "Usage: .buildkite/run-multi-node-test.sh WORKING_DIR NUM_NODES NUM_GPUS DOCKER_IMAGE COMMAND1 COMMAND2 ... COMMANDN"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
WORKING_DIR=$1
|
||||
NUM_NODES=$2
|
||||
NUM_GPUS=$3
|
||||
DOCKER_IMAGE=$4
|
||||
|
||||
shift 4
|
||||
COMMANDS=("$@")
|
||||
if [ ${#COMMANDS[@]} -ne $NUM_NODES ]; then
|
||||
echo "The number of commands must be equal to the number of nodes."
|
||||
echo "Number of nodes: $NUM_NODES"
|
||||
echo "Number of commands: ${#COMMANDS[@]}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "List of commands"
|
||||
for command in "${COMMANDS[@]}"; do
|
||||
echo $command
|
||||
done
|
||||
|
||||
start_network() {
|
||||
docker network create --subnet=192.168.10.0/24 docker-net
|
||||
}
|
||||
|
||||
start_nodes() {
|
||||
for node in $(seq 0 $(($NUM_NODES-1))); do
|
||||
GPU_DEVICES='"device='
|
||||
for node_gpu in $(seq 0 $(($NUM_GPUS - 1))); do
|
||||
DEVICE_NUM=$(($node * $NUM_GPUS + $node_gpu))
|
||||
GPU_DEVICES+=$(($DEVICE_NUM))
|
||||
if [ $node_gpu -lt $(($NUM_GPUS - 1)) ]; then
|
||||
GPU_DEVICES+=','
|
||||
fi
|
||||
done
|
||||
GPU_DEVICES+='"'
|
||||
|
||||
# start the container in detached mode
|
||||
# things to note:
|
||||
# 1. --shm-size=10.24gb is required. don't use --ipc=host
|
||||
# 2. pass HF_TOKEN to the container
|
||||
# 3. map the huggingface cache directory to the container
|
||||
# 3. assign ip addresses to the containers (head node: 192.168.10.10, worker nodes:
|
||||
# starting from 192.168.10.11)
|
||||
docker run -d --gpus "$GPU_DEVICES" --shm-size=10.24gb -e HF_TOKEN -v ~/.cache/huggingface:/root/.cache/huggingface --name node$node --network docker-net --ip 192.168.10.$((10 + $node)) --rm $DOCKER_IMAGE /bin/bash -c "tail -f /dev/null"
|
||||
|
||||
# organize containers into a ray cluster
|
||||
if [ $node -eq 0 ]; then
|
||||
# start the ray head node
|
||||
docker exec -d node$node /bin/bash -c "ray start --head --port=6379 --block"
|
||||
# wait for the head node to be ready
|
||||
sleep 10
|
||||
else
|
||||
# start the ray worker nodes, and connect them to the head node
|
||||
docker exec -d node$node /bin/bash -c "ray start --address=192.168.10.10:6379 --block"
|
||||
fi
|
||||
done
|
||||
|
||||
# wait for the cluster to be ready
|
||||
sleep 10
|
||||
|
||||
# print the cluster status
|
||||
docker exec node0 /bin/bash -c "ray status"
|
||||
}
|
||||
|
||||
run_nodes() {
|
||||
# important: iterate in reverse order to start the head node last
|
||||
# we start the worker nodes first, in detached mode, and then start the head node
|
||||
# in the foreground, so that the output of the head node is visible in the buildkite logs
|
||||
for node in $(seq $(($NUM_NODES - 1)) -1 0); do
|
||||
GPU_DEVICES='"device='
|
||||
for node_gpu in $(seq 0 $(($NUM_GPUS - 1))); do
|
||||
DEVICE_NUM=$(($node * $NUM_GPUS + $node_gpu))
|
||||
GPU_DEVICES+=$(($DEVICE_NUM))
|
||||
if [ $node_gpu -lt $(($NUM_GPUS - 1)) ]; then
|
||||
GPU_DEVICES+=','
|
||||
fi
|
||||
done
|
||||
GPU_DEVICES+='"'
|
||||
echo "Running node$node with GPU devices: $GPU_DEVICES"
|
||||
if [ $node -ne 0 ]; then
|
||||
docker exec -d node$node /bin/bash -c "cd $WORKING_DIR ; ${COMMANDS[$node]}"
|
||||
else
|
||||
docker exec node$node /bin/bash -c "cd $WORKING_DIR ; ${COMMANDS[$node]}"
|
||||
fi
|
||||
done
|
||||
}
|
||||
cleanup() {
|
||||
for node in $(seq 0 $(($NUM_NODES-1))); do
|
||||
docker stop node$node
|
||||
done
|
||||
docker network rm docker-net
|
||||
}
|
||||
trap cleanup EXIT
|
||||
start_network
|
||||
start_nodes
|
||||
run_nodes
|
||||
|
51
.buildkite/run-neuron-test.sh
Normal file
51
.buildkite/run-neuron-test.sh
Normal file
@ -0,0 +1,51 @@
|
||||
# This script build the Neuron docker image and run the API server inside the container.
|
||||
# It serves a sanity check for compilation and basic model usage.
|
||||
set -e
|
||||
|
||||
# Try building the docker image
|
||||
aws ecr get-login-password --region us-west-2 | docker login --username AWS --password-stdin 763104351884.dkr.ecr.us-west-2.amazonaws.com
|
||||
|
||||
# prune old image and containers to save disk space, and only once a day
|
||||
# by using a timestamp file in tmp.
|
||||
if [ -f /tmp/neuron-docker-build-timestamp ]; then
|
||||
last_build=$(cat /tmp/neuron-docker-build-timestamp)
|
||||
current_time=$(date +%s)
|
||||
if [ $((current_time - last_build)) -gt 86400 ]; then
|
||||
docker system prune -f
|
||||
echo $current_time > /tmp/neuron-docker-build-timestamp
|
||||
fi
|
||||
else
|
||||
echo $(date +%s) > /tmp/neuron-docker-build-timestamp
|
||||
fi
|
||||
|
||||
docker build -t neuron -f Dockerfile.neuron .
|
||||
|
||||
# Setup cleanup
|
||||
remove_docker_container() { docker rm -f neuron || true; }
|
||||
trap remove_docker_container EXIT
|
||||
remove_docker_container
|
||||
|
||||
# Run the image
|
||||
docker run --device=/dev/neuron0 --device=/dev/neuron1 --network host --name neuron neuron python3 -m vllm.entrypoints.api_server \
|
||||
--model TinyLlama/TinyLlama-1.1B-Chat-v1.0 --max-num-seqs 8 --max-model-len 128 --block-size 128 --device neuron --tensor-parallel-size 2 &
|
||||
|
||||
# Wait for the server to start
|
||||
wait_for_server_to_start() {
|
||||
timeout=300
|
||||
counter=0
|
||||
|
||||
while [ "$(curl -s -o /dev/null -w ''%{http_code}'' localhost:8000/health)" != "200" ]; do
|
||||
sleep 1
|
||||
counter=$((counter + 1))
|
||||
if [ $counter -ge $timeout ]; then
|
||||
echo "Timeout after $timeout seconds"
|
||||
break
|
||||
fi
|
||||
done
|
||||
}
|
||||
wait_for_server_to_start
|
||||
|
||||
# Test a simple prompt
|
||||
curl -X POST -H "Content-Type: application/json" \
|
||||
localhost:8000/generate \
|
||||
-d '{"prompt": "San Francisco is a"}'
|
14
.buildkite/run-openvino-test.sh
Executable file
14
.buildkite/run-openvino-test.sh
Executable file
@ -0,0 +1,14 @@
|
||||
# This script build the OpenVINO docker image and run the offline inference inside the container.
|
||||
# It serves a sanity check for compilation and basic model usage.
|
||||
set -ex
|
||||
|
||||
# Try building the docker image
|
||||
docker build -t openvino-test -f Dockerfile.openvino .
|
||||
|
||||
# Setup cleanup
|
||||
remove_docker_container() { docker rm -f openvino-test || true; }
|
||||
trap remove_docker_container EXIT
|
||||
remove_docker_container
|
||||
|
||||
# Run the image and launch offline inference
|
||||
docker run --network host --env VLLM_OPENVINO_KVCACHE_SPACE=1 --name openvino-test openvino-test python3 /workspace/vllm/examples/offline_inference.py
|
14
.buildkite/run-xpu-test.sh
Normal file
14
.buildkite/run-xpu-test.sh
Normal file
@ -0,0 +1,14 @@
|
||||
# This script build the CPU docker image and run the offline inference inside the container.
|
||||
# It serves a sanity check for compilation and basic model usage.
|
||||
set -ex
|
||||
|
||||
# Try building the docker image
|
||||
docker build -t xpu-test -f Dockerfile.xpu .
|
||||
|
||||
# Setup cleanup
|
||||
remove_docker_container() { docker rm -f xpu-test || true; }
|
||||
trap remove_docker_container EXIT
|
||||
remove_docker_container
|
||||
|
||||
# Run the image and launch offline inference
|
||||
docker run --network host --name xpu-test --device /dev/dri -v /dev/dri/by-path:/dev/dri/by-path xpu-test python3 examples/offline_inference.py
|
292
.buildkite/test-pipeline.yaml
Normal file
292
.buildkite/test-pipeline.yaml
Normal file
@ -0,0 +1,292 @@
|
||||
# In this file, you can add more tests to run either by adding a new step or
|
||||
# adding a new command to an existing step. See different options here for examples.
|
||||
|
||||
# This script will be feed into Jinja template in `test-template-aws.j2` at
|
||||
# https://github.com/vllm-project/buildkite-ci/blob/main/scripts/test-template-aws.j2
|
||||
# to generate the final pipeline yaml file.
|
||||
|
||||
|
||||
steps:
|
||||
- label: Async Engine, Inputs, Utils, Worker Test
|
||||
fast_check: true
|
||||
fast_check_only: true
|
||||
commands:
|
||||
- pytest -v -s async_engine # Async Engine
|
||||
- bash ../.buildkite/download-images.sh # Inputs
|
||||
- pytest -v -s test_inputs.py
|
||||
- pytest -v -s multimodal
|
||||
- pytest -v -s test_utils.py # Utils
|
||||
- pytest -v -s worker # Worker
|
||||
|
||||
- label: Tensorizer, Metrics, Tracing Test
|
||||
fast_check: true
|
||||
fast_check_only: true
|
||||
commands:
|
||||
- apt-get install curl libsodium23 && pytest -v -s tensorizer_loader # Tensorizer
|
||||
- pytest -v -s metrics # Metrics
|
||||
- "pip install \
|
||||
opentelemetry-sdk \
|
||||
opentelemetry-api \
|
||||
opentelemetry-exporter-otlp \
|
||||
opentelemetry-semantic-conventions-ai" # Tracing
|
||||
- pytest -v -s tracing
|
||||
|
||||
- label: Regression Test
|
||||
mirror_hardwares: [amd]
|
||||
fast_check: true
|
||||
command: pytest -v -s test_regression.py
|
||||
working_dir: "/vllm-workspace/tests" # optional
|
||||
|
||||
- label: AsyncEngine Test
|
||||
#mirror_hardwares: [amd]
|
||||
command: pytest -v -s async_engine
|
||||
|
||||
- label: Basic Correctness Test
|
||||
mirror_hardwares: [amd]
|
||||
fast_check: true
|
||||
commands:
|
||||
- pip install https://github.com/flashinfer-ai/flashinfer/releases/download/v0.0.8/flashinfer-0.0.8+cu121torch2.3-cp310-cp310-linux_x86_64.whl
|
||||
- pytest -v -s basic_correctness/test_basic_correctness.py
|
||||
- VLLM_ATTENTION_BACKEND=XFORMERS pytest -v -s basic_correctness/test_chunked_prefill.py
|
||||
- VLLM_ATTENTION_BACKEND=FLASH_ATTN pytest -v -s basic_correctness/test_chunked_prefill.py
|
||||
- VLLM_TEST_ENABLE_ARTIFICIAL_PREEMPT=1 pytest -v -s basic_correctness/test_preemption.py
|
||||
|
||||
- label: Core Test
|
||||
mirror_hardwares: [amd]
|
||||
fast_check: true
|
||||
commands:
|
||||
- pytest -v -s core
|
||||
- pytest -v -s distributed/test_parallel_state.py
|
||||
|
||||
- label: Distributed Comm Ops Test
|
||||
#mirror_hardwares: [amd]
|
||||
working_dir: "/vllm-workspace/tests"
|
||||
num_gpus: 2
|
||||
commands:
|
||||
- pytest -v -s distributed/test_comm_ops.py
|
||||
- pytest -v -s distributed/test_shm_broadcast.py
|
||||
|
||||
- label: 2 Node Tests (4 GPUs in total)
|
||||
working_dir: "/vllm-workspace/tests"
|
||||
num_gpus: 2
|
||||
num_nodes: 2
|
||||
commands:
|
||||
- # the following commands are for the first node, with ip 192.168.10.10 (ray environment already set up)
|
||||
- VLLM_TEST_SAME_HOST=0 torchrun --nnodes 2 --nproc-per-node=2 --rdzv_backend=c10d --rdzv_endpoint=192.168.10.10 distributed/test_same_node.py
|
||||
- TP_SIZE=2 PP_SIZE=2 EAGER_MODE=1 CHUNKED_PREFILL=0 pytest -v -s distributed/test_pipeline_parallel.py
|
||||
- # the following commands are for the second node, with ip 192.168.10.11 (ray environment already set up)
|
||||
- VLLM_TEST_SAME_HOST=0 torchrun --nnodes 2 --nproc-per-node=2 --rdzv_backend=c10d --rdzv_endpoint=192.168.10.10 distributed/test_same_node.py
|
||||
|
||||
- label: Distributed Tests (2 GPUs)
|
||||
mirror_hardwares: [amd]
|
||||
working_dir: "/vllm-workspace/tests"
|
||||
num_gpus: 2
|
||||
commands:
|
||||
- bash ../.buildkite/download-images.sh
|
||||
- VLLM_TEST_SAME_HOST=1 torchrun --nproc-per-node=4 distributed/test_same_node.py
|
||||
- TEST_DIST_MODEL=facebook/opt-125m DISTRIBUTED_EXECUTOR_BACKEND=ray pytest -v -s distributed/test_basic_distributed_correctness.py
|
||||
- TEST_DIST_MODEL=meta-llama/Llama-2-7b-hf DISTRIBUTED_EXECUTOR_BACKEND=ray pytest -v -s distributed/test_basic_distributed_correctness.py
|
||||
- TEST_DIST_MODEL=facebook/opt-125m DISTRIBUTED_EXECUTOR_BACKEND=ray pytest -v -s distributed/test_chunked_prefill_distributed.py
|
||||
- TEST_DIST_MODEL=meta-llama/Llama-2-7b-hf DISTRIBUTED_EXECUTOR_BACKEND=ray pytest -v -s distributed/test_chunked_prefill_distributed.py
|
||||
- TEST_DIST_MODEL=llava-hf/llava-1.5-7b-hf DISTRIBUTED_EXECUTOR_BACKEND=ray pytest -v -s distributed/test_multimodal_broadcast.py
|
||||
- TEST_DIST_MODEL=microsoft/Phi-3-vision-128k-instruct DISTRIBUTED_EXECUTOR_BACKEND=ray pytest -v -s distributed/test_multimodal_broadcast.py
|
||||
- TEST_DIST_MODEL=facebook/opt-125m DISTRIBUTED_EXECUTOR_BACKEND=mp pytest -v -s distributed/test_basic_distributed_correctness.py
|
||||
- TEST_DIST_MODEL=meta-llama/Llama-2-7b-hf DISTRIBUTED_EXECUTOR_BACKEND=mp pytest -v -s distributed/test_basic_distributed_correctness.py
|
||||
- TEST_DIST_MODEL=facebook/opt-125m DISTRIBUTED_EXECUTOR_BACKEND=mp pytest -v -s distributed/test_chunked_prefill_distributed.py
|
||||
- TEST_DIST_MODEL=meta-llama/Llama-2-7b-hf DISTRIBUTED_EXECUTOR_BACKEND=mp pytest -v -s distributed/test_chunked_prefill_distributed.py
|
||||
- TEST_DIST_MODEL=llava-hf/llava-1.5-7b-hf DISTRIBUTED_EXECUTOR_BACKEND=mp pytest -v -s distributed/test_multimodal_broadcast.py
|
||||
- TEST_DIST_MODEL=microsoft/Phi-3-vision-128k-instruct DISTRIBUTED_EXECUTOR_BACKEND=mp pytest -v -s distributed/test_multimodal_broadcast.py
|
||||
- pytest -v -s spec_decode/e2e/test_integration_dist_tp2.py
|
||||
- CUDA_VISIBLE_DEVICES=0,1 pytest -v -s test_sharded_state_loader.py
|
||||
- CUDA_VISIBLE_DEVICES=0,1 pytest -v -s distributed/test_utils.py
|
||||
|
||||
- label: Distributed Tests (4 GPUs)
|
||||
#mirror_hardwares: [amd]
|
||||
working_dir: "/vllm-workspace/tests"
|
||||
num_gpus: 4
|
||||
fast_check: true
|
||||
commands:
|
||||
- pytest -v -s distributed/test_pynccl.py
|
||||
# We want to test that models which use 2 GPUs work with 4 GPUs, which is why we duplicate them here.
|
||||
# See https://github.com/vllm-project/vllm/pull/5473#issuecomment-2166601837 for context.
|
||||
- TEST_DIST_MODEL=facebook/opt-125m DISTRIBUTED_EXECUTOR_BACKEND=ray pytest -v -s distributed/test_basic_distributed_correctness.py
|
||||
- TEST_DIST_MODEL=facebook/opt-125m DISTRIBUTED_EXECUTOR_BACKEND=mp pytest -v -s distributed/test_basic_distributed_correctness.py
|
||||
- pytest -v -s spec_decode/e2e/test_integration_dist_tp4.py
|
||||
|
||||
- label: Pipeline Parallelism Test
|
||||
working_dir: "/vllm-workspace/tests"
|
||||
num_gpus: 4
|
||||
commands:
|
||||
- TP_SIZE=2 PP_SIZE=2 EAGER_MODE=1 CHUNKED_PREFILL=1 pytest -v -s distributed/test_pipeline_parallel.py
|
||||
- TP_SIZE=2 PP_SIZE=2 EAGER_MODE=1 CHUNKED_PREFILL=0 pytest -v -s distributed/test_pipeline_parallel.py
|
||||
- TP_SIZE=1 PP_SIZE=3 EAGER_MODE=1 CHUNKED_PREFILL=0 pytest -v -s distributed/test_pipeline_parallel.py
|
||||
- PP_SIZE=4 EAGER_MODE=1 CHUNKED_PREFILL=1 pytest -v -s distributed/test_pipeline_parallel.py
|
||||
- PP_SIZE=4 EAGER_MODE=1 CHUNKED_PREFILL=0 pytest -v -s distributed/test_pipeline_parallel.py
|
||||
|
||||
|
||||
- label: Engine Test
|
||||
mirror_hardwares: [amd]
|
||||
commands:
|
||||
- pytest -v -s engine test_sequence.py test_config.py test_logger.py
|
||||
# OOM in the CI unless we run this separately
|
||||
- pytest -v -s tokenization
|
||||
|
||||
- label: Entrypoints Test
|
||||
fast_check: true
|
||||
mirror_hardwares: [amd]
|
||||
|
||||
commands:
|
||||
- pytest -v -s entrypoints/llm
|
||||
- pytest -v -s entrypoints/openai
|
||||
|
||||
- label: Examples Test
|
||||
working_dir: "/vllm-workspace/examples"
|
||||
mirror_hardwares: [amd]
|
||||
commands:
|
||||
# install aws cli for llava_example.py
|
||||
# install tensorizer for tensorize_vllm_model.py
|
||||
- pip install awscli tensorizer
|
||||
- python3 offline_inference.py
|
||||
- python3 offline_inference_with_prefix.py
|
||||
- python3 llm_engine_example.py
|
||||
- python3 llava_example.py
|
||||
- python3 tensorize_vllm_model.py --model facebook/opt-125m serialize --serialized-directory /tmp/ --suffix v1 && python3 tensorize_vllm_model.py --model facebook/opt-125m deserialize --path-to-tensors /tmp/vllm/facebook/opt-125m/v1/model.tensors
|
||||
|
||||
- label: Inputs Test
|
||||
#mirror_hardwares: [amd]
|
||||
commands:
|
||||
- bash ../.buildkite/download-images.sh
|
||||
- pytest -v -s test_inputs.py
|
||||
- pytest -v -s multimodal
|
||||
|
||||
- label: Kernels Test %N
|
||||
#mirror_hardwares: [amd]
|
||||
commands:
|
||||
- pip install https://github.com/flashinfer-ai/flashinfer/releases/download/v0.0.8/flashinfer-0.0.8+cu121torch2.3-cp310-cp310-linux_x86_64.whl
|
||||
- pytest -v -s kernels --shard-id=$$BUILDKITE_PARALLEL_JOB --num-shards=$$BUILDKITE_PARALLEL_JOB_COUNT
|
||||
parallelism: 4
|
||||
|
||||
- label: Models Test
|
||||
#mirror_hardwares: [amd]
|
||||
commands:
|
||||
- pip install https://github.com/flashinfer-ai/flashinfer/releases/download/v0.0.8/flashinfer-0.0.8+cu121torch2.3-cp310-cp310-linux_x86_64.whl
|
||||
- pytest -v -s models -m \"not vlm\"
|
||||
|
||||
- label: Vision Language Models Test
|
||||
mirror_hardwares: [amd]
|
||||
commands:
|
||||
- bash ../.buildkite/download-images.sh
|
||||
- pytest -v -s models -m vlm
|
||||
|
||||
- label: Prefix Caching Test
|
||||
mirror_hardwares: [amd]
|
||||
commands:
|
||||
- pytest -v -s prefix_caching
|
||||
|
||||
- label: Samplers Test
|
||||
#mirror_hardwares: [amd]
|
||||
command: pytest -v -s samplers
|
||||
|
||||
- label: LogitsProcessor Test
|
||||
mirror_hardwares: [amd]
|
||||
command: pytest -v -s test_logits_processor.py
|
||||
|
||||
- label: Utils Test
|
||||
commands:
|
||||
- pytest -v -s test_utils.py
|
||||
- pytest -v -s test_embedded_commit.py
|
||||
|
||||
- label: Worker Test
|
||||
mirror_hardwares: [amd]
|
||||
command: pytest -v -s worker
|
||||
|
||||
- label: Speculative decoding tests
|
||||
#mirror_hardwares: [amd]
|
||||
commands:
|
||||
# See https://github.com/vllm-project/vllm/issues/5152
|
||||
- export VLLM_ATTENTION_BACKEND=XFORMERS
|
||||
- pytest -v -s spec_decode
|
||||
|
||||
- label: LoRA Test %N
|
||||
#mirror_hardwares: [amd]
|
||||
command: pytest -v -s lora --shard-id=$$BUILDKITE_PARALLEL_JOB --num-shards=$$BUILDKITE_PARALLEL_JOB_COUNT --ignore=lora/test_long_context.py
|
||||
parallelism: 4
|
||||
|
||||
- label: LoRA Long Context (Distributed)
|
||||
#mirror_hardwares: [amd]
|
||||
num_gpus: 4
|
||||
# This test runs llama 13B, so it is required to run on 4 GPUs.
|
||||
commands:
|
||||
# FIXIT: find out which code initialize cuda before running the test
|
||||
# before the fix, we need to use spawn to test it
|
||||
- export VLLM_WORKER_MULTIPROC_METHOD=spawn
|
||||
- pytest -v -s -x lora/test_long_context.py
|
||||
|
||||
- label: Tensorizer Test
|
||||
#mirror_hardwares: [amd]
|
||||
commands:
|
||||
- apt-get install curl libsodium23
|
||||
- export VLLM_WORKER_MULTIPROC_METHOD=spawn
|
||||
- pytest -v -s tensorizer_loader
|
||||
|
||||
- label: Metrics Test
|
||||
mirror_hardwares: [amd]
|
||||
command: pytest -v -s metrics
|
||||
|
||||
- label: Quantization Test
|
||||
#mirror_hardwares: [amd]
|
||||
command: pytest -v -s quantization
|
||||
|
||||
- label: Tracing Test
|
||||
commands:
|
||||
- "pip install \
|
||||
opentelemetry-sdk \
|
||||
opentelemetry-api \
|
||||
opentelemetry-exporter-otlp \
|
||||
opentelemetry-semantic-conventions-ai"
|
||||
- pytest -v -s tracing
|
||||
|
||||
- label: Benchmarks
|
||||
working_dir: "/vllm-workspace/.buildkite"
|
||||
mirror_hardwares: [amd]
|
||||
commands:
|
||||
- pip install aiohttp
|
||||
- bash run-benchmarks.sh
|
||||
|
||||
- label: LM Eval Small Models
|
||||
working_dir: "/vllm-workspace/.buildkite/lm-eval-harness"
|
||||
commands:
|
||||
- pip install lm-eval
|
||||
- export VLLM_WORKER_MULTIPROC_METHOD=spawn
|
||||
- bash ./run-tests.sh -c configs/models-small.txt -t 1
|
||||
|
||||
- label: LM Eval Large Models
|
||||
gpu: a100
|
||||
num_gpus: 4
|
||||
working_dir: "/vllm-workspace/.buildkite/lm-eval-harness"
|
||||
commands:
|
||||
- pip install lm-eval
|
||||
- export VLLM_WORKER_MULTIPROC_METHOD=spawn
|
||||
- bash ./run-tests.sh -c configs/models-large.txt -t 4
|
||||
|
||||
- label: Documentation Build
|
||||
working_dir: "/vllm-workspace/test_docs/docs"
|
||||
fast_check: true
|
||||
no_gpu: True
|
||||
commands:
|
||||
- pip install -r requirements-docs.txt
|
||||
- SPHINXOPTS=\"-W\" make html
|
||||
|
||||
- label: Distributed Tests (A100)
|
||||
gpu: a100
|
||||
num_gpus: 4
|
||||
commands:
|
||||
# NOTE: don't test llama model here, it seems hf implementation is buggy
|
||||
# see https://github.com/vllm-project/vllm/pull/5689 for details
|
||||
- pytest -v -s distributed/test_custom_all_reduce.py
|
||||
- TEST_DIST_MODEL=facebook/opt-125m DISTRIBUTED_EXECUTOR_BACKEND=ray pytest -v -s distributed/test_basic_distributed_correctness.py
|
||||
- TEST_DIST_MODEL=facebook/opt-125m DISTRIBUTED_EXECUTOR_BACKEND=mp pytest -v -s distributed/test_basic_distributed_correctness.py
|
||||
- pip install https://github.com/flashinfer-ai/flashinfer/releases/download/v0.0.8/flashinfer-0.0.8+cu121torch2.3-cp310-cp310-linux_x86_64.whl
|
||||
- VLLM_ATTENTION_BACKEND=FLASHINFER TEST_DIST_MODEL=facebook/opt-125m DISTRIBUTED_EXECUTOR_BACKEND=ray pytest -v -s distributed/test_basic_distributed_correctness.py
|
||||
- VLLM_ATTENTION_BACKEND=FLASHINFER TEST_DIST_MODEL=meta-llama/Meta-Llama-3-8B DISTRIBUTED_EXECUTOR_BACKEND=ray pytest -v -s distributed/test_basic_distributed_correctness.py
|
||||
- pytest -v -s -x lora/test_mixtral.py
|
26
.clang-format
Normal file
26
.clang-format
Normal file
@ -0,0 +1,26 @@
|
||||
BasedOnStyle: Google
|
||||
UseTab: Never
|
||||
IndentWidth: 2
|
||||
ColumnLimit: 80
|
||||
|
||||
# Force pointers to the type for C++.
|
||||
DerivePointerAlignment: false
|
||||
PointerAlignment: Left
|
||||
|
||||
# Reordering #include statements can (and currently will) introduce errors
|
||||
SortIncludes: false
|
||||
|
||||
# Style choices
|
||||
AlignConsecutiveAssignments: false
|
||||
AlignConsecutiveDeclarations: false
|
||||
IndentPPDirectives: BeforeHash
|
||||
|
||||
IncludeCategories:
|
||||
- Regex: '^<'
|
||||
Priority: 4
|
||||
- Regex: '^"(llvm|llvm-c|clang|clang-c|mlir|mlir-c)/'
|
||||
Priority: 3
|
||||
- Regex: '^"(qoda|\.\.)/'
|
||||
Priority: 2
|
||||
- Regex: '.*'
|
||||
Priority: 1
|
1
.dockerignore
Normal file
1
.dockerignore
Normal file
@ -0,0 +1 @@
|
||||
vllm/*.so
|
2
.github/FUNDING.yml
vendored
Normal file
2
.github/FUNDING.yml
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
github: [vllm-project]
|
||||
open_collective: [vllm]
|
22
.github/ISSUE_TEMPLATE/100-documentation.yml
vendored
Normal file
22
.github/ISSUE_TEMPLATE/100-documentation.yml
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
name: 📚 Documentation
|
||||
description: Report an issue related to https://docs.vllm.ai/
|
||||
title: "[Doc]: "
|
||||
labels: ["documentation"]
|
||||
|
||||
body:
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: 📚 The doc issue
|
||||
description: >
|
||||
A clear and concise description of what content in https://docs.vllm.ai/ is an issue.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Suggest a potential alternative/fix
|
||||
description: >
|
||||
Tell us how we could improve the documentation in this regard.
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: >
|
||||
Thanks for contributing 🎉!
|
40
.github/ISSUE_TEMPLATE/200-installation.yml
vendored
Normal file
40
.github/ISSUE_TEMPLATE/200-installation.yml
vendored
Normal file
@ -0,0 +1,40 @@
|
||||
name: 🛠️ Installation
|
||||
description: Report an issue here when you hit errors during installation.
|
||||
title: "[Installation]: "
|
||||
labels: ["installation"]
|
||||
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: >
|
||||
#### Before submitting an issue, please make sure the issue hasn't been already addressed by searching through [the existing and past issues](https://github.com/vllm-project/vllm/issues?q=is%3Aissue+sort%3Acreated-desc+).
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Your current environment
|
||||
description: |
|
||||
Please run the following and paste the output below.
|
||||
```sh
|
||||
wget https://raw.githubusercontent.com/vllm-project/vllm/main/collect_env.py
|
||||
# For security purposes, please feel free to check the contents of collect_env.py before running it.
|
||||
python collect_env.py
|
||||
```
|
||||
It is suggested to download and execute the latest script, as vllm might frequently update the diagnosis information needed for accurately and quickly responding to issues.
|
||||
value: |
|
||||
```text
|
||||
The output of `python collect_env.py`
|
||||
```
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: How you are installing vllm
|
||||
description: |
|
||||
Paste the full command you are trying to execute.
|
||||
value: |
|
||||
```sh
|
||||
pip install -vvv vllm
|
||||
```
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: >
|
||||
Thanks for contributing 🎉!
|
38
.github/ISSUE_TEMPLATE/300-usage.yml
vendored
Normal file
38
.github/ISSUE_TEMPLATE/300-usage.yml
vendored
Normal file
@ -0,0 +1,38 @@
|
||||
name: 💻 Usage
|
||||
description: Raise an issue here if you don't know how to use vllm.
|
||||
title: "[Usage]: "
|
||||
labels: ["usage"]
|
||||
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: >
|
||||
#### Before submitting an issue, please make sure the issue hasn't been already addressed by searching through [the existing and past issues](https://github.com/vllm-project/vllm/issues?q=is%3Aissue+sort%3Acreated-desc+).
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Your current environment
|
||||
description: |
|
||||
Please run the following and paste the output below.
|
||||
```sh
|
||||
wget https://raw.githubusercontent.com/vllm-project/vllm/main/collect_env.py
|
||||
# For security purposes, please feel free to check the contents of collect_env.py before running it.
|
||||
python collect_env.py
|
||||
```
|
||||
It is suggested to download and execute the latest script, as vllm might frequently update the diagnosis information needed for accurately and quickly responding to issues.
|
||||
value: |
|
||||
```text
|
||||
The output of `python collect_env.py`
|
||||
```
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: How would you like to use vllm
|
||||
description: |
|
||||
A detailed description of how you want to use vllm.
|
||||
value: |
|
||||
I want to run inference of a [specific model](put link here). I don't know how to integrate it with vllm.
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: >
|
||||
Thanks for contributing 🎉!
|
86
.github/ISSUE_TEMPLATE/400-bug report.yml
vendored
Normal file
86
.github/ISSUE_TEMPLATE/400-bug report.yml
vendored
Normal file
@ -0,0 +1,86 @@
|
||||
name: 🐛 Bug report
|
||||
description: Raise an issue here if you find a bug.
|
||||
title: "[Bug]: "
|
||||
labels: ["bug"]
|
||||
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: >
|
||||
#### Before submitting an issue, please make sure the issue hasn't been already addressed by searching through [the existing and past issues](https://github.com/vllm-project/vllm/issues?q=is%3Aissue+sort%3Acreated-desc+).
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Your current environment
|
||||
description: |
|
||||
Please run the following and paste the output below.
|
||||
```sh
|
||||
wget https://raw.githubusercontent.com/vllm-project/vllm/main/collect_env.py
|
||||
# For security purposes, please feel free to check the contents of collect_env.py before running it.
|
||||
python collect_env.py
|
||||
```
|
||||
It is suggested to download and execute the latest script, as vllm might frequently update the diagnosis information needed for accurately and quickly responding to issues.
|
||||
value: |
|
||||
```text
|
||||
The output of `python collect_env.py`
|
||||
```
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: 🐛 Describe the bug
|
||||
description: |
|
||||
Please provide a clear and concise description of what the bug is.
|
||||
|
||||
If relevant, add a minimal example so that we can reproduce the error by running the code. It is very important for the snippet to be as succinct (minimal) as possible, so please take time to trim down any irrelevant code to help us debug efficiently. We are going to copy-paste your code and we expect to get the same result as you did: avoid any external data, and include the relevant imports, etc. For example:
|
||||
|
||||
```python
|
||||
from vllm import LLM, SamplingParams
|
||||
|
||||
prompts = [
|
||||
"Hello, my name is",
|
||||
"The president of the United States is",
|
||||
"The capital of France is",
|
||||
"The future of AI is",
|
||||
]
|
||||
sampling_params = SamplingParams(temperature=0.8, top_p=0.95)
|
||||
|
||||
llm = LLM(model="facebook/opt-125m")
|
||||
|
||||
outputs = llm.generate(prompts, sampling_params)
|
||||
|
||||
# Print the outputs.
|
||||
for output in outputs:
|
||||
prompt = output.prompt
|
||||
generated_text = output.outputs[0].text
|
||||
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
|
||||
```
|
||||
|
||||
If the code is too long (hopefully, it isn't), feel free to put it in a public gist and link it in the issue: https://gist.github.com.
|
||||
|
||||
Please also paste or describe the results you observe instead of the expected results. If you observe an error, please paste the error message including the **full** traceback of the exception. It may be relevant to wrap error messages in ```` ```triple quotes blocks``` ````.
|
||||
|
||||
Please set the environment variable `export VLLM_LOGGING_LEVEL=DEBUG` to turn on more logging to help debugging potential issues.
|
||||
|
||||
If you experienced crashes or hangs, it would be helpful to run vllm with `export VLLM_TRACE_FUNCTION=1` . All the function calls in vllm will be recorded. Inspect these log files, and tell which function crashes or hangs.
|
||||
placeholder: |
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
```python
|
||||
# Sample code to reproduce the problem
|
||||
```
|
||||
|
||||
```
|
||||
The error message you got, with the full traceback.
|
||||
```
|
||||
validations:
|
||||
required: true
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: >
|
||||
⚠️ Please separate bugs of `transformers` implementation or usage from bugs of `vllm`. If you think anything is wrong with the models' output:
|
||||
|
||||
- Try the counterpart of `transformers` first. If the error appears, please go to [their issues](https://github.com/huggingface/transformers/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc).
|
||||
|
||||
- If the error only appears in vllm, please provide the detailed script of how you run `transformers` and `vllm`, also highlight the difference and what you expect.
|
||||
|
||||
Thanks for contributing 🎉!
|
31
.github/ISSUE_TEMPLATE/500-feature request.yml
vendored
Normal file
31
.github/ISSUE_TEMPLATE/500-feature request.yml
vendored
Normal file
@ -0,0 +1,31 @@
|
||||
name: 🚀 Feature request
|
||||
description: Submit a proposal/request for a new vllm feature
|
||||
title: "[Feature]: "
|
||||
labels: ["feature request"]
|
||||
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: >
|
||||
#### Before submitting an issue, please make sure the issue hasn't been already addressed by searching through [the existing and past issues](https://github.com/vllm-project/vllm/issues?q=is%3Aissue+sort%3Acreated-desc+).
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: 🚀 The feature, motivation and pitch
|
||||
description: >
|
||||
A clear and concise description of the feature proposal. Please outline the motivation for the proposal. Is your feature request related to a specific problem? e.g., *"I'm working on X and would like Y to be possible"*. If this is related to another GitHub issue, please link here too.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Alternatives
|
||||
description: >
|
||||
A description of any alternative solutions or features you've considered, if any.
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Additional context
|
||||
description: >
|
||||
Add any other context or screenshots about the feature request.
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: >
|
||||
Thanks for contributing 🎉!
|
33
.github/ISSUE_TEMPLATE/600-new model.yml
vendored
Normal file
33
.github/ISSUE_TEMPLATE/600-new model.yml
vendored
Normal file
@ -0,0 +1,33 @@
|
||||
name: 🤗 Support request for a new model from huggingface
|
||||
description: Submit a proposal/request for a new model from huggingface
|
||||
title: "[New Model]: "
|
||||
labels: ["new model"]
|
||||
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: >
|
||||
#### Before submitting an issue, please make sure the issue hasn't been already addressed by searching through [the existing and past issues](https://github.com/vllm-project/vllm/issues?q=is%3Aissue+sort%3Acreated-desc+).
|
||||
|
||||
#### We also highly recommend you read https://docs.vllm.ai/en/latest/models/adding_model.html first to understand how to add a new model.
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: The model to consider.
|
||||
description: >
|
||||
A huggingface url, pointing to the model, e.g. https://huggingface.co/openai-community/gpt2 .
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: The closest model vllm already supports.
|
||||
description: >
|
||||
Here is the list of models already supported by vllm: https://github.com/vllm-project/vllm/tree/main/vllm/model_executor/models . Which model is the most similar to the model you want to add support for?
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: What's your difficulty of supporting the model you want?
|
||||
description: >
|
||||
For example, any new operators or new architecture?
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: >
|
||||
Thanks for contributing 🎉!
|
52
.github/ISSUE_TEMPLATE/700-performance discussion.yml
vendored
Normal file
52
.github/ISSUE_TEMPLATE/700-performance discussion.yml
vendored
Normal file
@ -0,0 +1,52 @@
|
||||
name: ⚡ Discussion on the performance of vllm
|
||||
description: Submit a proposal/discussion about the performance of vllm
|
||||
title: "[Performance]: "
|
||||
labels: ["performance"]
|
||||
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: >
|
||||
#### Before submitting an issue, please make sure the issue hasn't been already addressed by searching through [the existing and past issues](https://github.com/vllm-project/vllm/issues?q=is%3Aissue+sort%3Acreated-desc+).
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Proposal to improve performance
|
||||
description: >
|
||||
How do you plan to improve vllm's performance?
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Report of performance regression
|
||||
description: >
|
||||
Please provide detailed description of performance comparison to confirm the regression. You may want to run the benchmark script at https://github.com/vllm-project/vllm/tree/main/benchmarks .
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Misc discussion on performance
|
||||
description: >
|
||||
Anything about the performance.
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Your current environment (if you think it is necessary)
|
||||
description: |
|
||||
Please run the following and paste the output below.
|
||||
```sh
|
||||
wget https://raw.githubusercontent.com/vllm-project/vllm/main/collect_env.py
|
||||
# For security purposes, please feel free to check the contents of collect_env.py before running it.
|
||||
python collect_env.py
|
||||
```
|
||||
It is suggested to download and execute the latest script, as vllm might frequently update the diagnosis information needed for accurately and quickly responding to issues.
|
||||
value: |
|
||||
```text
|
||||
The output of `python collect_env.py`
|
||||
```
|
||||
validations:
|
||||
required: false
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: >
|
||||
Thanks for contributing 🎉!
|
49
.github/ISSUE_TEMPLATE/750-RFC.yml
vendored
Normal file
49
.github/ISSUE_TEMPLATE/750-RFC.yml
vendored
Normal file
@ -0,0 +1,49 @@
|
||||
name: 💬 Request for comments (RFC).
|
||||
description: Ask for feedback on major architectural changes or design choices.
|
||||
title: "[RFC]: "
|
||||
labels: ["RFC"]
|
||||
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: >
|
||||
#### Please take a look at previous [RFCs](https://github.com/vllm-project/vllm/issues?q=label%3ARFC+sort%3Aupdated-desc) for reference.
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Motivation.
|
||||
description: >
|
||||
The motivation of the RFC.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Proposed Change.
|
||||
description: >
|
||||
The proposed change of the RFC.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Feedback Period.
|
||||
description: >
|
||||
The feedback period of the RFC. Usually at least one week.
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: CC List.
|
||||
description: >
|
||||
The list of people you want to CC.
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Any Other Things.
|
||||
description: >
|
||||
Any other things you would like to mention.
|
||||
validations:
|
||||
required: false
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: >
|
||||
Thanks for contributing 🎉!
|
21
.github/ISSUE_TEMPLATE/800-misc discussion.yml
vendored
Normal file
21
.github/ISSUE_TEMPLATE/800-misc discussion.yml
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
name: 🎲 Misc/random discussions that do not fit into the above categories.
|
||||
description: Submit a discussion as you like. Note that developers are heavily overloaded and we mainly rely on community users to answer these issues.
|
||||
title: "[Misc]: "
|
||||
labels: ["misc"]
|
||||
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: >
|
||||
#### Before submitting an issue, please make sure the issue hasn't been already addressed by searching through [the existing and past issues](https://github.com/vllm-project/vllm/issues?q=is%3Aissue+sort%3Acreated-desc+).
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Anything you want to discuss about vllm.
|
||||
description: >
|
||||
Anything you want to discuss about vllm.
|
||||
validations:
|
||||
required: true
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: >
|
||||
Thanks for contributing 🎉!
|
1
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
1
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@ -0,0 +1 @@
|
||||
blank_issues_enabled: false
|
64
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
64
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
@ -0,0 +1,64 @@
|
||||
FILL IN THE PR DESCRIPTION HERE
|
||||
|
||||
FIX #xxxx (*link existing issues this PR will resolve*)
|
||||
|
||||
**BEFORE SUBMITTING, PLEASE READ THE CHECKLIST BELOW AND FILL IN THE DESCRIPTION ABOVE**
|
||||
|
||||
---
|
||||
|
||||
<details>
|
||||
<!-- inside this <details> section, markdown rendering does not work, so we use raw html here. -->
|
||||
<summary><b> PR Checklist (Click to Expand) </b></summary>
|
||||
|
||||
<p>Thank you for your contribution to vLLM! Before submitting the pull request, please ensure the PR meets the following criteria. This helps vLLM maintain the code quality and improve the efficiency of the review process.</p>
|
||||
|
||||
<h3>PR Title and Classification</h3>
|
||||
<p>Only specific types of PRs will be reviewed. The PR title is prefixed appropriately to indicate the type of change. Please use one of the following:</p>
|
||||
<ul>
|
||||
<li><code>[Bugfix]</code> for bug fixes.</li>
|
||||
<li><code>[CI/Build]</code> for build or continuous integration improvements.</li>
|
||||
<li><code>[Doc]</code> for documentation fixes and improvements.</li>
|
||||
<li><code>[Model]</code> for adding a new model or improving an existing model. Model name should appear in the title.</li>
|
||||
<li><code>[Frontend]</code> For changes on the vLLM frontend (e.g., OpenAI API server, <code>LLM</code> class, etc.) </li>
|
||||
<li><code>[Kernel]</code> for changes affecting CUDA kernels or other compute kernels.</li>
|
||||
<li><code>[Core]</code> for changes in the core vLLM logic (e.g., <code>LLMEngine</code>, <code>AsyncLLMEngine</code>, <code>Scheduler</code>, etc.)</li>
|
||||
<li><code>[Hardware][Vendor]</code> for hardware-specific changes. Vendor name should appear in the prefix (e.g., <code>[Hardware][AMD]</code>).</li>
|
||||
<li><code>[Misc]</code> for PRs that do not fit the above categories. Please use this sparingly.</li>
|
||||
</ul>
|
||||
<p><strong>Note:</strong> If the PR spans more than one category, please include all relevant prefixes.</p>
|
||||
|
||||
<h3>Code Quality</h3>
|
||||
|
||||
<p>The PR need to meet the following code quality standards:</p>
|
||||
|
||||
<ul>
|
||||
<li>We adhere to <a href="https://google.github.io/styleguide/pyguide.html">Google Python style guide</a> and <a href="https://google.github.io/styleguide/cppguide.html">Google C++ style guide</a>.</li>
|
||||
<li>Pass all linter checks. Please use <a href="https://github.com/vllm-project/vllm/blob/main/format.sh"><code>format.sh</code></a> to format your code.</li>
|
||||
<li>The code need to be well-documented to ensure future contributors can easily understand the code.</li>
|
||||
<li>Include sufficient tests to ensure the project to stay correct and robust. This includes both unit tests and integration tests.</li>
|
||||
<li>Please add documentation to <code>docs/source/</code> if the PR modifies the user-facing behaviors of vLLM. It helps vLLM user understand and utilize the new features or changes.</li>
|
||||
</ul>
|
||||
|
||||
<h3>Notes for Large Changes</h3>
|
||||
<p>Please keep the changes as concise as possible. For major architectural changes (>500 LOC excluding kernel/data/config/test), we would expect a GitHub issue (RFC) discussing the technical design and justification. Otherwise, we will tag it with <code>rfc-required</code> and might not go through the PR.</p>
|
||||
|
||||
<h3>What to Expect for the Reviews</h3>
|
||||
|
||||
<p>The goal of the vLLM team is to be a <i>transparent reviewing machine</i>. We would like to make the review process transparent and efficient and make sure no contributor feel confused or frustrated. However, the vLLM team is small, so we need to prioritize some PRs over others. Here is what you can expect from the review process: </p>
|
||||
|
||||
<ul>
|
||||
<li> After the PR is submitted, the PR will be assigned to a reviewer. Every reviewer will pick up the PRs based on their expertise and availability.</li>
|
||||
<li> After the PR is assigned, the reviewer will provide status update every 2-3 days. If the PR is not reviewed within 7 days, please feel free to ping the reviewer or the vLLM team.</li>
|
||||
<li> After the review, the reviewer will put an <code> action-required</code> label on the PR if there are changes required. The contributor should address the comments and ping the reviewer to re-review the PR.</li>
|
||||
<li> Please respond to all comments within a reasonable time frame. If a comment isn't clear or you disagree with a suggestion, feel free to ask for clarification or discuss the suggestion.
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
<h3>Thank You</h3>
|
||||
|
||||
<p> Finally, thank you for taking the time to read these guidelines and for your interest in contributing to vLLM. Your contributions make vLLM a great tool for everyone! </p>
|
||||
|
||||
|
||||
</details>
|
||||
|
||||
|
21
.github/workflows/add_label_automerge.yml
vendored
Normal file
21
.github/workflows/add_label_automerge.yml
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
name: Add label on auto-merge enabled
|
||||
on:
|
||||
pull_request_target:
|
||||
types:
|
||||
- auto_merge_enabled
|
||||
jobs:
|
||||
add-label-on-auto-merge:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Add label
|
||||
uses: actions/github-script@v5
|
||||
with:
|
||||
script: |
|
||||
github.rest.issues.addLabels({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number,
|
||||
labels: ['ready']
|
||||
})
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
23
.github/workflows/add_label_ready_comment.yml
vendored
Normal file
23
.github/workflows/add_label_ready_comment.yml
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
name: Add Ready Label on Ready Comment
|
||||
|
||||
on:
|
||||
issue_comment:
|
||||
types: [created]
|
||||
|
||||
jobs:
|
||||
add-ready-label:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event.issue.pull_request && contains(github.event.comment.body, '/ready')
|
||||
steps:
|
||||
- name: Add label
|
||||
uses: actions/github-script@v5
|
||||
with:
|
||||
script: |
|
||||
github.rest.issues.addLabels({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number,
|
||||
labels: ['ready']
|
||||
})
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
42
.github/workflows/clang-format.yml
vendored
Normal file
42
.github/workflows/clang-format.yml
vendored
Normal file
@ -0,0 +1,42 @@
|
||||
name: clang-format
|
||||
|
||||
on:
|
||||
# Trigger the workflow on push or pull request,
|
||||
# but only for the main branch
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
|
||||
jobs:
|
||||
clang-format:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["3.11"]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install clang-format==18.1.5
|
||||
- name: Running clang-format
|
||||
run: |
|
||||
EXCLUDES=(
|
||||
'csrc/moe/topk_softmax_kernels.cu'
|
||||
'csrc/punica/bgmv/bgmv_bf16_bf16_bf16.cu'
|
||||
'csrc/punica/bgmv/bgmv_config.h'
|
||||
'csrc/punica/bgmv/bgmv_impl.cuh'
|
||||
'csrc/punica/bgmv/vec_dtypes.cuh'
|
||||
'csrc/punica/punica_ops.cu'
|
||||
'csrc/punica/type_convert.h'
|
||||
)
|
||||
find csrc/ \( -name '*.h' -o -name '*.cpp' -o -name '*.cu' -o -name '*.cuh' \) -print \
|
||||
| grep -vFf <(printf "%s\n" "${EXCLUDES[@]}") \
|
||||
| xargs clang-format --dry-run --Werror
|
53
.github/workflows/mypy.yaml
vendored
Normal file
53
.github/workflows/mypy.yaml
vendored
Normal file
@ -0,0 +1,53 @@
|
||||
name: mypy
|
||||
|
||||
on:
|
||||
# Trigger the workflow on push or pull request,
|
||||
# but only for the main branch
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
|
||||
jobs:
|
||||
ruff:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["3.8", "3.9", "3.10", "3.11"]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install mypy==1.9.0
|
||||
pip install types-setuptools
|
||||
pip install types-PyYAML
|
||||
pip install types-requests
|
||||
pip install types-setuptools
|
||||
- name: Mypy
|
||||
run: |
|
||||
mypy tests --config-file pyproject.toml
|
||||
mypy vllm/*.py --config-file pyproject.toml
|
||||
mypy vllm/attention --config-file pyproject.toml
|
||||
mypy vllm/core --config-file pyproject.toml
|
||||
mypy vllm/distributed --config-file pyproject.toml
|
||||
mypy vllm/engine --config-file pyproject.toml
|
||||
mypy vllm/entrypoints --config-file pyproject.toml
|
||||
mypy vllm/executor --config-file pyproject.toml
|
||||
mypy vllm/inputs --config-file pyproject.toml
|
||||
mypy vllm/logging --config-file pyproject.toml
|
||||
mypy vllm/lora --config-file pyproject.toml
|
||||
mypy vllm/model_executor --config-file pyproject.toml
|
||||
mypy vllm/multimodal --config-file pyproject.toml
|
||||
mypy vllm/platforms --config-file pyproject.toml
|
||||
mypy vllm/spec_decode --config-file pyproject.toml
|
||||
mypy vllm/transformers_utils --config-file pyproject.toml
|
||||
mypy vllm/usage --config-file pyproject.toml
|
||||
mypy vllm/worker --config-file pyproject.toml
|
||||
|
110
.github/workflows/publish.yml
vendored
Normal file
110
.github/workflows/publish.yml
vendored
Normal file
@ -0,0 +1,110 @@
|
||||
# This workflow will upload a Python Package to Release asset
|
||||
# For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions
|
||||
|
||||
name: Create Release
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- v*
|
||||
|
||||
# Needed to create release and upload assets
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
release:
|
||||
# Retrieve tag and create release
|
||||
name: Create Release
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Extract branch info
|
||||
shell: bash
|
||||
run: |
|
||||
echo "release_tag=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV
|
||||
|
||||
- name: Create Release
|
||||
id: create_release
|
||||
uses: "actions/github-script@v6"
|
||||
env:
|
||||
RELEASE_TAG: ${{ env.release_tag }}
|
||||
with:
|
||||
github-token: "${{ secrets.GITHUB_TOKEN }}"
|
||||
script: |
|
||||
const script = require('.github/workflows/scripts/create_release.js')
|
||||
await script(github, context, core)
|
||||
|
||||
wheel:
|
||||
name: Build Wheel
|
||||
runs-on: ${{ matrix.os }}
|
||||
needs: release
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: ['ubuntu-20.04']
|
||||
python-version: ['3.8', '3.9', '3.10', '3.11']
|
||||
pytorch-version: ['2.3.1'] # Must be the most recent version that meets requirements-cuda.txt.
|
||||
cuda-version: ['11.8', '12.1']
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Setup ccache
|
||||
uses: hendrikmuhs/ccache-action@v1.2
|
||||
with:
|
||||
create-symlink: true
|
||||
key: ${{ github.job }}-${{ matrix.python-version }}-${{ matrix.cuda-version }}
|
||||
|
||||
- name: Set up Linux Env
|
||||
if: ${{ runner.os == 'Linux' }}
|
||||
run: |
|
||||
bash -x .github/workflows/scripts/env.sh
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Install CUDA ${{ matrix.cuda-version }}
|
||||
run: |
|
||||
bash -x .github/workflows/scripts/cuda-install.sh ${{ matrix.cuda-version }} ${{ matrix.os }}
|
||||
|
||||
- name: Install PyTorch ${{ matrix.pytorch-version }} with CUDA ${{ matrix.cuda-version }}
|
||||
run: |
|
||||
bash -x .github/workflows/scripts/pytorch-install.sh ${{ matrix.python-version }} ${{ matrix.pytorch-version }} ${{ matrix.cuda-version }}
|
||||
|
||||
- name: Build wheel
|
||||
shell: bash
|
||||
env:
|
||||
CMAKE_BUILD_TYPE: Release # do not compile with debug symbol to reduce wheel size
|
||||
run: |
|
||||
bash -x .github/workflows/scripts/build.sh ${{ matrix.python-version }} ${{ matrix.cuda-version }}
|
||||
wheel_name=$(ls dist/*whl | xargs -n 1 basename)
|
||||
asset_name=${wheel_name//"linux"/"manylinux1"}
|
||||
echo "wheel_name=${wheel_name}" >> $GITHUB_ENV
|
||||
echo "asset_name=${asset_name}" >> $GITHUB_ENV
|
||||
|
||||
- name: Upload Release Asset
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ needs.release.outputs.upload_url }}
|
||||
asset_path: ./dist/${{ env.wheel_name }}
|
||||
asset_name: ${{ env.asset_name }}
|
||||
asset_content_type: application/*
|
||||
|
||||
# (Danielkinz): This last step will publish the .whl to pypi. Warning: untested
|
||||
# - name: Publish package
|
||||
# uses: pypa/gh-action-pypi-publish@release/v1.8
|
||||
# with:
|
||||
# repository-url: https://test.pypi.org/legacy/
|
||||
# password: ${{ secrets.PYPI_API_TOKEN }}
|
||||
# skip-existing: true
|
21
.github/workflows/reminder_comment.yml
vendored
Normal file
21
.github/workflows/reminder_comment.yml
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
name: PR Reminder Comment Bot
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [opened]
|
||||
|
||||
jobs:
|
||||
pr_reminder:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Remind to run full CI on PR
|
||||
uses: actions/github-script@v6
|
||||
with:
|
||||
script: |
|
||||
github.rest.issues.createComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number,
|
||||
body: '👋 Hi! Thank you for contributing to the vLLM project.\n Just a reminder: PRs would not trigger full CI run by default. Instead, it would only trigger `fastcheck` CI to run, which consists only a small and essential subset of tests to quickly catch errors with the flexibility to run extra individual tests on top (you can do this by unblocking test steps in the Buildkite run). \n\nFull CI run is still required to merge this PR so once the PR is ready to go, please make sure to run it. If you need all test signals in between PR commits, you can trigger full CI as well.\n\n To run full CI, you can do one of these:\n- Comment `/ready` on the PR\n- Add `ready` label to the PR\n- Enable auto-merge.\n\n🚀'
|
||||
})
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
37
.github/workflows/ruff.yml
vendored
Normal file
37
.github/workflows/ruff.yml
vendored
Normal file
@ -0,0 +1,37 @@
|
||||
name: ruff
|
||||
|
||||
on:
|
||||
# Trigger the workflow on push or pull request,
|
||||
# but only for the main branch
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
|
||||
jobs:
|
||||
ruff:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["3.8", "3.9", "3.10", "3.11"]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install ruff==0.1.5 codespell==2.3.0 tomli==2.0.1 isort==5.13.2
|
||||
- name: Analysing the code with ruff
|
||||
run: |
|
||||
ruff .
|
||||
- name: Spelling check with codespell
|
||||
run: |
|
||||
codespell --toml pyproject.toml
|
||||
- name: Run isort
|
||||
run: |
|
||||
isort . --check-only
|
21
.github/workflows/scripts/build.sh
vendored
Normal file
21
.github/workflows/scripts/build.sh
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
#!/bin/bash
|
||||
|
||||
python_executable=python$1
|
||||
cuda_home=/usr/local/cuda-$2
|
||||
|
||||
# Update paths
|
||||
PATH=${cuda_home}/bin:$PATH
|
||||
LD_LIBRARY_PATH=${cuda_home}/lib64:$LD_LIBRARY_PATH
|
||||
|
||||
# Install requirements
|
||||
$python_executable -m pip install wheel packaging
|
||||
$python_executable -m pip install -r requirements-cuda.txt
|
||||
|
||||
# Limit the number of parallel jobs to avoid OOM
|
||||
export MAX_JOBS=1
|
||||
# Make sure punica is built for the release (for LoRA)
|
||||
export VLLM_INSTALL_PUNICA_KERNELS=1
|
||||
# Make sure release wheels are built for the following architectures
|
||||
export TORCH_CUDA_ARCH_LIST="7.0 7.5 8.0 8.6 8.9 9.0+PTX"
|
||||
# Build
|
||||
$python_executable setup.py bdist_wheel --dist-dir=dist
|
20
.github/workflows/scripts/create_release.js
vendored
Normal file
20
.github/workflows/scripts/create_release.js
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
// Uses Github's API to create the release and wait for result.
|
||||
// We use a JS script since github CLI doesn't provide a way to wait for the release's creation and returns immediately.
|
||||
|
||||
module.exports = async (github, context, core) => {
|
||||
try {
|
||||
const response = await github.rest.repos.createRelease({
|
||||
draft: false,
|
||||
generate_release_notes: true,
|
||||
name: process.env.RELEASE_TAG,
|
||||
owner: context.repo.owner,
|
||||
prerelease: true,
|
||||
repo: context.repo.repo,
|
||||
tag_name: process.env.RELEASE_TAG,
|
||||
});
|
||||
|
||||
core.setOutput('upload_url', response.data.upload_url);
|
||||
} catch (error) {
|
||||
core.setFailed(error.message);
|
||||
}
|
||||
}
|
23
.github/workflows/scripts/cuda-install.sh
vendored
Normal file
23
.github/workflows/scripts/cuda-install.sh
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Replace '.' with '-' ex: 11.8 -> 11-8
|
||||
cuda_version=$(echo $1 | tr "." "-")
|
||||
# Removes '-' and '.' ex: ubuntu-20.04 -> ubuntu2004
|
||||
OS=$(echo $2 | tr -d ".\-")
|
||||
|
||||
# Installs CUDA
|
||||
wget -nv https://developer.download.nvidia.com/compute/cuda/repos/${OS}/x86_64/cuda-keyring_1.1-1_all.deb
|
||||
sudo dpkg -i cuda-keyring_1.1-1_all.deb
|
||||
rm cuda-keyring_1.1-1_all.deb
|
||||
sudo apt -qq update
|
||||
sudo apt -y install cuda-${cuda_version} cuda-nvcc-${cuda_version} cuda-libraries-dev-${cuda_version}
|
||||
sudo apt clean
|
||||
|
||||
# Test nvcc
|
||||
PATH=/usr/local/cuda-$1/bin:${PATH}
|
||||
nvcc --version
|
||||
|
||||
# Log gcc, g++, c++ versions
|
||||
gcc --version
|
||||
g++ --version
|
||||
c++ --version
|
56
.github/workflows/scripts/env.sh
vendored
Normal file
56
.github/workflows/scripts/env.sh
vendored
Normal file
@ -0,0 +1,56 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This file installs common linux environment tools
|
||||
|
||||
export LANG C.UTF-8
|
||||
|
||||
# python_version=$1
|
||||
|
||||
sudo apt-get update && \
|
||||
sudo apt-get install -y --no-install-recommends \
|
||||
software-properties-common \
|
||||
|
||||
sudo apt-get install -y --no-install-recommends \
|
||||
build-essential \
|
||||
apt-utils \
|
||||
ca-certificates \
|
||||
wget \
|
||||
git \
|
||||
vim \
|
||||
libssl-dev \
|
||||
curl \
|
||||
unzip \
|
||||
unrar \
|
||||
cmake \
|
||||
net-tools \
|
||||
sudo \
|
||||
autotools-dev \
|
||||
rsync \
|
||||
jq \
|
||||
openssh-server \
|
||||
tmux \
|
||||
screen \
|
||||
htop \
|
||||
pdsh \
|
||||
openssh-client \
|
||||
lshw \
|
||||
dmidecode \
|
||||
util-linux \
|
||||
automake \
|
||||
autoconf \
|
||||
libtool \
|
||||
net-tools \
|
||||
pciutils \
|
||||
libpci-dev \
|
||||
libaio-dev \
|
||||
libcap2 \
|
||||
libtinfo5 \
|
||||
fakeroot \
|
||||
devscripts \
|
||||
debhelper \
|
||||
nfs-common
|
||||
|
||||
# Remove github bloat files to free up disk space
|
||||
sudo rm -rf "/usr/local/share/boost"
|
||||
sudo rm -rf "$AGENT_TOOLSDIRECTORY"
|
||||
sudo rm -rf "/usr/share/dotnet"
|
15
.github/workflows/scripts/pytorch-install.sh
vendored
Normal file
15
.github/workflows/scripts/pytorch-install.sh
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
#!/bin/bash
|
||||
|
||||
python_executable=python$1
|
||||
pytorch_version=$2
|
||||
cuda_version=$3
|
||||
|
||||
# Install torch
|
||||
$python_executable -m pip install numpy pyyaml scipy ipython mkl mkl-include ninja cython typing pandas typing-extensions dataclasses setuptools && conda clean -ya
|
||||
$python_executable -m pip install torch==${pytorch_version}+cu${cuda_version//./} --extra-index-url https://download.pytorch.org/whl/cu${cuda_version//./}
|
||||
|
||||
# Print version information
|
||||
$python_executable --version
|
||||
$python_executable -c "import torch; print('PyTorch:', torch.__version__)"
|
||||
$python_executable -c "import torch; print('CUDA:', torch.version.cuda)"
|
||||
$python_executable -c "from torch.utils import cpp_extension; print (cpp_extension.CUDA_HOME)"
|
31
.github/workflows/yapf.yml
vendored
Normal file
31
.github/workflows/yapf.yml
vendored
Normal file
@ -0,0 +1,31 @@
|
||||
name: yapf
|
||||
|
||||
on:
|
||||
# Trigger the workflow on push or pull request,
|
||||
# but only for the main branch
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
jobs:
|
||||
yapf:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["3.8", "3.9", "3.10", "3.11"]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install yapf==0.32.0
|
||||
pip install toml==0.10.2
|
||||
- name: Running yapf
|
||||
run: |
|
||||
yapf --diff --recursive .
|
198
.gitignore
vendored
198
.gitignore
vendored
@ -1,10 +1,192 @@
|
||||
**/*.pyc
|
||||
**/__pycache__/
|
||||
*.egg-info/
|
||||
*.eggs/
|
||||
*.so
|
||||
build/
|
||||
# vllm commit id, generated by setup.py
|
||||
vllm/commit_id.py
|
||||
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
share/python-wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.nox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
*.py,cover
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
cover/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
db.sqlite3
|
||||
db.sqlite3-journal
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
docs/source/getting_started/examples/*.rst
|
||||
!**/*.template.rst
|
||||
|
||||
# PyBuilder
|
||||
.pybuilder/
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# IPython
|
||||
profile_default/
|
||||
ipython_config.py
|
||||
|
||||
# pyenv
|
||||
# For a library or package, you might want to ignore these files since the code is
|
||||
# intended to run in multiple environments; otherwise, check them in:
|
||||
# .python-version
|
||||
|
||||
# pipenv
|
||||
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
||||
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
||||
# install all needed dependencies.
|
||||
#Pipfile.lock
|
||||
|
||||
# poetry
|
||||
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
||||
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
||||
# commonly ignored for libraries.
|
||||
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
||||
#poetry.lock
|
||||
|
||||
# pdm
|
||||
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
||||
#pdm.lock
|
||||
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
||||
# in version control.
|
||||
# https://pdm.fming.dev/#use-with-ide
|
||||
.pdm.toml
|
||||
|
||||
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
||||
__pypackages__/
|
||||
|
||||
# Celery stuff
|
||||
celerybeat-schedule
|
||||
celerybeat.pid
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.env
|
||||
.venv
|
||||
env/
|
||||
venv/
|
||||
ENV/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
|
||||
# Pyre type checker
|
||||
.pyre/
|
||||
|
||||
# pytype static type analyzer
|
||||
.pytype/
|
||||
|
||||
# Cython debug symbols
|
||||
cython_debug/
|
||||
|
||||
# PyCharm
|
||||
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
||||
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
||||
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
||||
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||
.idea/
|
||||
|
||||
# VSCode
|
||||
.vscode/
|
||||
|
||||
# DS Store
|
||||
.DS_Store
|
||||
|
||||
# Results
|
||||
*.csv
|
||||
|
||||
# Python pickle files
|
||||
*.pkl
|
||||
*.png
|
||||
**/log.txt
|
||||
|
||||
# Sphinx documentation
|
||||
_build/
|
||||
|
||||
# vim swap files
|
||||
*.swo
|
||||
*.swp
|
||||
|
||||
# hip files generated by PyTorch
|
||||
*.hip
|
||||
*_hip*
|
||||
hip_compat.h
|
||||
|
||||
# Benchmark dataset
|
||||
*.json
|
||||
|
21
.readthedocs.yaml
Normal file
21
.readthedocs.yaml
Normal file
@ -0,0 +1,21 @@
|
||||
# Read the Docs configuration file
|
||||
# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
|
||||
|
||||
version: 2
|
||||
|
||||
build:
|
||||
os: ubuntu-22.04
|
||||
tools:
|
||||
python: "3.8"
|
||||
|
||||
sphinx:
|
||||
configuration: docs/source/conf.py
|
||||
|
||||
# If using Sphinx, optionally build your docs in additional formats such as PDF
|
||||
formats:
|
||||
- pdf
|
||||
|
||||
# Optionally declare the Python requirements required to build your docs
|
||||
python:
|
||||
install:
|
||||
- requirements: docs/requirements-docs.txt
|
1
.yapfignore
Normal file
1
.yapfignore
Normal file
@ -0,0 +1 @@
|
||||
collect_env.py
|
309
CMakeLists.txt
Normal file
309
CMakeLists.txt
Normal file
@ -0,0 +1,309 @@
|
||||
cmake_minimum_required(VERSION 3.21)
|
||||
|
||||
project(vllm_extensions LANGUAGES CXX)
|
||||
|
||||
# CUDA by default, can be overridden by using -DVLLM_TARGET_DEVICE=... (used by setup.py)
|
||||
set(VLLM_TARGET_DEVICE "cuda" CACHE STRING "Target device backend for vLLM")
|
||||
|
||||
message(STATUS "Build type: ${CMAKE_BUILD_TYPE}")
|
||||
message(STATUS "Target device: ${VLLM_TARGET_DEVICE}")
|
||||
|
||||
include(${CMAKE_CURRENT_LIST_DIR}/cmake/utils.cmake)
|
||||
|
||||
#
|
||||
# Supported python versions. These versions will be searched in order, the
|
||||
# first match will be selected. These should be kept in sync with setup.py.
|
||||
#
|
||||
set(PYTHON_SUPPORTED_VERSIONS "3.8" "3.9" "3.10" "3.11")
|
||||
|
||||
# Supported NVIDIA architectures.
|
||||
set(CUDA_SUPPORTED_ARCHS "7.0;7.5;8.0;8.6;8.9;9.0")
|
||||
|
||||
# Supported AMD GPU architectures.
|
||||
set(HIP_SUPPORTED_ARCHS "gfx906;gfx908;gfx90a;gfx940;gfx941;gfx942;gfx1030;gfx1100")
|
||||
|
||||
#
|
||||
# Supported/expected torch versions for CUDA/ROCm.
|
||||
#
|
||||
# Currently, having an incorrect pytorch version results in a warning
|
||||
# rather than an error.
|
||||
#
|
||||
# Note: the CUDA torch version is derived from pyproject.toml and various
|
||||
# requirements.txt files and should be kept consistent. The ROCm torch
|
||||
# versions are derived from Dockerfile.rocm
|
||||
#
|
||||
set(TORCH_SUPPORTED_VERSION_CUDA "2.3.1")
|
||||
set(TORCH_SUPPORTED_VERSION_ROCM "2.4.0")
|
||||
|
||||
#
|
||||
# Try to find python package with an executable that exactly matches
|
||||
# `VLLM_PYTHON_EXECUTABLE` and is one of the supported versions.
|
||||
#
|
||||
if (VLLM_PYTHON_EXECUTABLE)
|
||||
find_python_from_executable(${VLLM_PYTHON_EXECUTABLE} "${PYTHON_SUPPORTED_VERSIONS}")
|
||||
else()
|
||||
message(FATAL_ERROR
|
||||
"Please set VLLM_PYTHON_EXECUTABLE to the path of the desired python version"
|
||||
" before running cmake configure.")
|
||||
endif()
|
||||
|
||||
#
|
||||
# Update cmake's `CMAKE_PREFIX_PATH` with torch location.
|
||||
#
|
||||
append_cmake_prefix_path("torch" "torch.utils.cmake_prefix_path")
|
||||
|
||||
# Ensure the 'nvcc' command is in the PATH
|
||||
find_program(NVCC_EXECUTABLE nvcc)
|
||||
if (CUDA_FOUND AND NOT NVCC_EXECUTABLE)
|
||||
message(FATAL_ERROR "nvcc not found")
|
||||
endif()
|
||||
|
||||
#
|
||||
# Import torch cmake configuration.
|
||||
# Torch also imports CUDA (and partially HIP) languages with some customizations,
|
||||
# so there is no need to do this explicitly with check_language/enable_language,
|
||||
# etc.
|
||||
#
|
||||
find_package(Torch REQUIRED)
|
||||
|
||||
#
|
||||
# Forward the non-CUDA device extensions to external CMake scripts.
|
||||
#
|
||||
if (NOT VLLM_TARGET_DEVICE STREQUAL "cuda" AND
|
||||
NOT VLLM_TARGET_DEVICE STREQUAL "rocm")
|
||||
if (VLLM_TARGET_DEVICE STREQUAL "cpu")
|
||||
include(${CMAKE_CURRENT_LIST_DIR}/cmake/cpu_extension.cmake)
|
||||
else()
|
||||
message(FATAL_ERROR "Unsupported vLLM target device: ${VLLM_TARGET_DEVICE}")
|
||||
endif()
|
||||
return()
|
||||
endif()
|
||||
|
||||
#
|
||||
# Set up GPU language and check the torch version and warn if it isn't
|
||||
# what is expected.
|
||||
#
|
||||
if (NOT HIP_FOUND AND CUDA_FOUND)
|
||||
set(VLLM_GPU_LANG "CUDA")
|
||||
|
||||
if (NOT Torch_VERSION VERSION_EQUAL ${TORCH_SUPPORTED_VERSION_CUDA})
|
||||
message(WARNING "Pytorch version ${TORCH_SUPPORTED_VERSION_CUDA} "
|
||||
"expected for CUDA build, saw ${Torch_VERSION} instead.")
|
||||
endif()
|
||||
elseif(HIP_FOUND)
|
||||
set(VLLM_GPU_LANG "HIP")
|
||||
|
||||
# Importing torch recognizes and sets up some HIP/ROCm configuration but does
|
||||
# not let cmake recognize .hip files. In order to get cmake to understand the
|
||||
# .hip extension automatically, HIP must be enabled explicitly.
|
||||
enable_language(HIP)
|
||||
|
||||
# ROCm 5.X and 6.X
|
||||
if (ROCM_VERSION_DEV_MAJOR GREATER_EQUAL 5 AND
|
||||
NOT Torch_VERSION VERSION_EQUAL ${TORCH_SUPPORTED_VERSION_ROCM})
|
||||
message(WARNING "Pytorch version ${TORCH_SUPPORTED_VERSION_ROCM} "
|
||||
"expected for ROCm build, saw ${Torch_VERSION} instead.")
|
||||
endif()
|
||||
else()
|
||||
message(FATAL_ERROR "Can't find CUDA or HIP installation.")
|
||||
endif()
|
||||
|
||||
#
|
||||
# Override the GPU architectures detected by cmake/torch and filter them by
|
||||
# the supported versions for the current language.
|
||||
# The final set of arches is stored in `VLLM_GPU_ARCHES`.
|
||||
#
|
||||
override_gpu_arches(VLLM_GPU_ARCHES
|
||||
${VLLM_GPU_LANG}
|
||||
"${${VLLM_GPU_LANG}_SUPPORTED_ARCHS}")
|
||||
|
||||
#
|
||||
# Query torch for additional GPU compilation flags for the given
|
||||
# `VLLM_GPU_LANG`.
|
||||
# The final set of arches is stored in `VLLM_GPU_FLAGS`.
|
||||
#
|
||||
get_torch_gpu_compiler_flags(VLLM_GPU_FLAGS ${VLLM_GPU_LANG})
|
||||
|
||||
#
|
||||
# Set nvcc parallelism.
|
||||
#
|
||||
if(NVCC_THREADS AND VLLM_GPU_LANG STREQUAL "CUDA")
|
||||
list(APPEND VLLM_GPU_FLAGS "--threads=${NVCC_THREADS}")
|
||||
endif()
|
||||
|
||||
#
|
||||
# Define extension targets
|
||||
#
|
||||
|
||||
#
|
||||
# _C extension
|
||||
#
|
||||
|
||||
set(VLLM_EXT_SRC
|
||||
"csrc/cache_kernels.cu"
|
||||
"csrc/attention/attention_kernels.cu"
|
||||
"csrc/pos_encoding_kernels.cu"
|
||||
"csrc/activation_kernels.cu"
|
||||
"csrc/layernorm_kernels.cu"
|
||||
"csrc/quantization/squeezellm/quant_cuda_kernel.cu"
|
||||
"csrc/quantization/gptq/q_gemm.cu"
|
||||
"csrc/quantization/compressed_tensors/int8_quant_kernels.cu"
|
||||
"csrc/quantization/fp8/common.cu"
|
||||
"csrc/cuda_utils_kernels.cu"
|
||||
"csrc/moe_align_block_size_kernels.cu"
|
||||
"csrc/torch_bindings.cpp")
|
||||
|
||||
if(VLLM_GPU_LANG STREQUAL "CUDA")
|
||||
include(FetchContent)
|
||||
SET(CUTLASS_ENABLE_HEADERS_ONLY=ON)
|
||||
FetchContent_Declare(
|
||||
cutlass
|
||||
GIT_REPOSITORY https://github.com/nvidia/cutlass.git
|
||||
# CUTLASS 3.5.0
|
||||
GIT_TAG 7d49e6c7e2f8896c47f586706e67e1fb215529dc
|
||||
)
|
||||
FetchContent_MakeAvailable(cutlass)
|
||||
|
||||
list(APPEND VLLM_EXT_SRC
|
||||
"csrc/quantization/aqlm/gemm_kernels.cu"
|
||||
"csrc/quantization/awq/gemm_kernels.cu"
|
||||
"csrc/quantization/marlin/dense/marlin_cuda_kernel.cu"
|
||||
"csrc/quantization/marlin/sparse/marlin_24_cuda_kernel.cu"
|
||||
"csrc/quantization/gptq_marlin/gptq_marlin.cu"
|
||||
"csrc/quantization/gptq_marlin/gptq_marlin_repack.cu"
|
||||
"csrc/quantization/fp8/fp8_marlin.cu"
|
||||
"csrc/custom_all_reduce.cu"
|
||||
"csrc/quantization/cutlass_w8a8/scaled_mm_entry.cu"
|
||||
"csrc/quantization/cutlass_w8a8/scaled_mm_c2x.cu"
|
||||
"csrc/quantization/cutlass_w8a8/scaled_mm_c3x.cu")
|
||||
|
||||
#
|
||||
# The CUTLASS kernels for Hopper require sm90a to be enabled.
|
||||
# This is done via the below gencode option, BUT that creates kernels for both sm90 and sm90a.
|
||||
# That adds an extra 17MB to compiled binary, so instead we selectively enable it.
|
||||
if(${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER 12.0)
|
||||
set_source_files_properties(
|
||||
"csrc/quantization/cutlass_w8a8/scaled_mm_c3x.cu"
|
||||
PROPERTIES
|
||||
COMPILE_FLAGS
|
||||
"-gencode arch=compute_90a,code=sm_90a")
|
||||
endif()
|
||||
|
||||
endif()
|
||||
|
||||
define_gpu_extension_target(
|
||||
_C
|
||||
DESTINATION vllm
|
||||
LANGUAGE ${VLLM_GPU_LANG}
|
||||
SOURCES ${VLLM_EXT_SRC}
|
||||
COMPILE_FLAGS ${VLLM_GPU_FLAGS}
|
||||
ARCHITECTURES ${VLLM_GPU_ARCHES}
|
||||
INCLUDE_DIRECTORIES ${CUTLASS_INCLUDE_DIR};${CUTLASS_TOOLS_UTIL_INCLUDE_DIR}
|
||||
USE_SABI 3
|
||||
WITH_SOABI)
|
||||
|
||||
#
|
||||
# _moe_C extension
|
||||
#
|
||||
|
||||
set(VLLM_MOE_EXT_SRC
|
||||
"csrc/moe/torch_bindings.cpp"
|
||||
"csrc/moe/topk_softmax_kernels.cu")
|
||||
|
||||
define_gpu_extension_target(
|
||||
_moe_C
|
||||
DESTINATION vllm
|
||||
LANGUAGE ${VLLM_GPU_LANG}
|
||||
SOURCES ${VLLM_MOE_EXT_SRC}
|
||||
COMPILE_FLAGS ${VLLM_GPU_FLAGS}
|
||||
ARCHITECTURES ${VLLM_GPU_ARCHES}
|
||||
USE_SABI 3
|
||||
WITH_SOABI)
|
||||
|
||||
#
|
||||
# _punica_C extension
|
||||
#
|
||||
|
||||
set(VLLM_PUNICA_EXT_SRC
|
||||
"csrc/punica/bgmv/bgmv_bf16_bf16_bf16.cu"
|
||||
"csrc/punica/bgmv/bgmv_bf16_fp32_bf16.cu"
|
||||
"csrc/punica/bgmv/bgmv_fp16_fp16_fp16.cu"
|
||||
"csrc/punica/bgmv/bgmv_fp16_fp32_fp16.cu"
|
||||
"csrc/punica/bgmv/bgmv_fp32_bf16_bf16.cu"
|
||||
"csrc/punica/bgmv/bgmv_fp32_fp16_fp16.cu"
|
||||
"csrc/punica/punica_ops.cu"
|
||||
"csrc/punica/torch_bindings.cpp")
|
||||
|
||||
#
|
||||
# Copy GPU compilation flags+update for punica
|
||||
#
|
||||
set(VLLM_PUNICA_GPU_FLAGS ${VLLM_GPU_FLAGS})
|
||||
list(REMOVE_ITEM VLLM_PUNICA_GPU_FLAGS
|
||||
"-D__CUDA_NO_HALF_OPERATORS__"
|
||||
"-D__CUDA_NO_HALF_CONVERSIONS__"
|
||||
"-D__CUDA_NO_BFLOAT16_CONVERSIONS__"
|
||||
"-D__CUDA_NO_HALF2_OPERATORS__")
|
||||
|
||||
#
|
||||
# Filter out CUDA architectures < 8.0 for punica.
|
||||
#
|
||||
if (${VLLM_GPU_LANG} STREQUAL "CUDA")
|
||||
set(VLLM_PUNICA_GPU_ARCHES)
|
||||
foreach(ARCH ${VLLM_GPU_ARCHES})
|
||||
string_to_ver(CODE_VER ${ARCH})
|
||||
if (CODE_VER GREATER_EQUAL 8.0)
|
||||
list(APPEND VLLM_PUNICA_GPU_ARCHES ${ARCH})
|
||||
endif()
|
||||
endforeach()
|
||||
message(STATUS "Punica target arches: ${VLLM_PUNICA_GPU_ARCHES}")
|
||||
elseif(${VLLM_GPU_LANG} STREQUAL "HIP")
|
||||
set(VLLM_PUNICA_GPU_ARCHES ${VLLM_GPU_ARCHES})
|
||||
message(STATUS "Punica target arches: ${VLLM_PUNICA_GPU_ARCHES}")
|
||||
endif()
|
||||
|
||||
if (VLLM_PUNICA_GPU_ARCHES)
|
||||
define_gpu_extension_target(
|
||||
_punica_C
|
||||
DESTINATION vllm
|
||||
LANGUAGE ${VLLM_GPU_LANG}
|
||||
SOURCES ${VLLM_PUNICA_EXT_SRC}
|
||||
COMPILE_FLAGS ${VLLM_PUNICA_GPU_FLAGS}
|
||||
ARCHITECTURES ${VLLM_PUNICA_GPU_ARCHES}
|
||||
USE_SABI 3
|
||||
WITH_SOABI)
|
||||
else()
|
||||
message(WARNING "Unable to create _punica_C target because none of the "
|
||||
"requested architectures (${VLLM_GPU_ARCHES}) are supported, i.e. >= 8.0")
|
||||
endif()
|
||||
|
||||
#
|
||||
# Add the `default` target which detects which extensions should be
|
||||
# built based on platform/architecture. This is the same logic that
|
||||
# setup.py uses to select which extensions should be built and should
|
||||
# be kept in sync.
|
||||
#
|
||||
# The `default` target makes direct use of cmake easier since knowledge
|
||||
# of which extensions are supported has been factored in, e.g.
|
||||
#
|
||||
# mkdir build && cd build
|
||||
# cmake -G Ninja -DVLLM_PYTHON_EXECUTABLE=`which python3` -DCMAKE_LIBRARY_OUTPUT_DIRECTORY=../vllm ..
|
||||
# cmake --build . --target default
|
||||
#
|
||||
add_custom_target(default)
|
||||
|
||||
if(VLLM_GPU_LANG STREQUAL "CUDA" OR VLLM_GPU_LANG STREQUAL "HIP")
|
||||
message(STATUS "Enabling C extension.")
|
||||
add_dependencies(default _C)
|
||||
|
||||
message(STATUS "Enabling moe extension.")
|
||||
add_dependencies(default _moe_C)
|
||||
|
||||
# Enable punica if -DVLLM_INSTALL_PUNICA_KERNELS=ON or
|
||||
# VLLM_INSTALL_PUNICA_KERNELS is set in the environment and
|
||||
# there are supported target arches.
|
||||
if (VLLM_PUNICA_GPU_ARCHES AND
|
||||
(ENV{VLLM_INSTALL_PUNICA_KERNELS} OR VLLM_INSTALL_PUNICA_KERNELS))
|
||||
message(STATUS "Enabling punica extension.")
|
||||
add_dependencies(default _punica_C)
|
||||
endif()
|
||||
endif()
|
56
CONTRIBUTING.md
Normal file
56
CONTRIBUTING.md
Normal file
@ -0,0 +1,56 @@
|
||||
# Contributing to vLLM
|
||||
|
||||
Thank you for your interest in contributing to vLLM!
|
||||
Our community is open to everyone and welcomes all kinds of contributions, no matter how small or large.
|
||||
There are several ways you can contribute to the project:
|
||||
|
||||
- Identify and report any issues or bugs.
|
||||
- Request or add a new model.
|
||||
- Suggest or implement new features.
|
||||
|
||||
However, remember that contributions aren't just about code.
|
||||
We believe in the power of community support; thus, answering queries, assisting others, and enhancing the documentation are highly regarded and beneficial contributions.
|
||||
|
||||
Finally, one of the most impactful ways to support us is by raising awareness about vLLM.
|
||||
Talk about it in your blog posts, highlighting how it's driving your incredible projects.
|
||||
Express your support on Twitter if vLLM aids you, or simply offer your appreciation by starring our repository.
|
||||
|
||||
|
||||
## Setup for development
|
||||
|
||||
### Build from source
|
||||
|
||||
```bash
|
||||
pip install -e . # This may take several minutes.
|
||||
```
|
||||
|
||||
### Testing
|
||||
|
||||
```bash
|
||||
pip install -r requirements-dev.txt
|
||||
|
||||
# linting and formatting
|
||||
bash format.sh
|
||||
# Static type checking
|
||||
mypy
|
||||
# Unit tests
|
||||
pytest tests/
|
||||
```
|
||||
**Note:** Currently, the repository does not pass the mypy tests.
|
||||
|
||||
|
||||
## Contributing Guidelines
|
||||
|
||||
### Issue Reporting
|
||||
|
||||
If you encounter a bug or have a feature request, please check our issues page first to see if someone else has already reported it.
|
||||
If not, please file a new issue, providing as much relevant information as possible.
|
||||
|
||||
### Pull Requests & Code Reviews
|
||||
|
||||
Please check the PR checklist in the [PR template](.github/PULL_REQUEST_TEMPLATE.md) for detailed guide for contribution.
|
||||
|
||||
### Thank You
|
||||
|
||||
Finally, thank you for taking the time to read these guidelines and for your interest in contributing to vLLM.
|
||||
Your contributions make vLLM a great tool for everyone!
|
210
Dockerfile
Normal file
210
Dockerfile
Normal file
@ -0,0 +1,210 @@
|
||||
# The vLLM Dockerfile is used to construct vLLM image that can be directly used
|
||||
# to run the OpenAI compatible server.
|
||||
|
||||
# Please update any changes made here to
|
||||
# docs/source/dev/dockerfile/dockerfile.rst and
|
||||
# docs/source/assets/dev/dockerfile-stages-dependency.png
|
||||
|
||||
ARG CUDA_VERSION=12.4.1
|
||||
#################### BASE BUILD IMAGE ####################
|
||||
# prepare basic build environment
|
||||
FROM nvidia/cuda:${CUDA_VERSION}-devel-ubuntu22.04 AS base
|
||||
|
||||
ARG CUDA_VERSION=12.4.1
|
||||
ARG PYTHON_VERSION=3
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
RUN echo 'tzdata tzdata/Areas select America' | debconf-set-selections \
|
||||
&& echo 'tzdata tzdata/Zones/America select Los_Angeles' | debconf-set-selections \
|
||||
&& apt-get update -y \
|
||||
&& apt-get install -y ccache software-properties-common \
|
||||
&& add-apt-repository ppa:deadsnakes/ppa \
|
||||
&& apt-get update -y \
|
||||
&& apt-get install -y python${PYTHON_VERSION} python${PYTHON_VERSION}-dev python${PYTHON_VERSION}-venv python3-pip \
|
||||
&& if [ "${PYTHON_VERSION}" != "3" ]; then update-alternatives --install /usr/bin/python3 python3 /usr/bin/python${PYTHON_VERSION} 1; fi \
|
||||
&& python3 --version \
|
||||
&& python3 -m pip --version
|
||||
|
||||
RUN apt-get update -y \
|
||||
&& apt-get install -y python3-pip git curl sudo
|
||||
|
||||
# Workaround for https://github.com/openai/triton/issues/2507 and
|
||||
# https://github.com/pytorch/pytorch/issues/107960 -- hopefully
|
||||
# this won't be needed for future versions of this docker image
|
||||
# or future versions of triton.
|
||||
RUN ldconfig /usr/local/cuda-$(echo $CUDA_VERSION | cut -d. -f1,2)/compat/
|
||||
|
||||
WORKDIR /workspace
|
||||
|
||||
# install build and runtime dependencies
|
||||
COPY requirements-common.txt requirements-common.txt
|
||||
COPY requirements-cuda.txt requirements-cuda.txt
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
python3 -m pip install -r requirements-cuda.txt
|
||||
|
||||
COPY requirements-mamba.txt requirements-mamba.txt
|
||||
RUN python3 -m pip install packaging
|
||||
RUN python3 -m pip install -r requirements-mamba.txt
|
||||
|
||||
# cuda arch list used by torch
|
||||
# can be useful for both `dev` and `test`
|
||||
# explicitly set the list to avoid issues with torch 2.2
|
||||
# see https://github.com/pytorch/pytorch/pull/123243
|
||||
ARG torch_cuda_arch_list='7.0 7.5 8.0 8.6 8.9 9.0+PTX'
|
||||
ENV TORCH_CUDA_ARCH_LIST=${torch_cuda_arch_list}
|
||||
#################### BASE BUILD IMAGE ####################
|
||||
|
||||
#################### WHEEL BUILD IMAGE ####################
|
||||
FROM base AS build
|
||||
|
||||
ARG PYTHON_VERSION=3
|
||||
|
||||
# install build dependencies
|
||||
COPY requirements-build.txt requirements-build.txt
|
||||
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
python3 -m pip install -r requirements-build.txt
|
||||
|
||||
# install compiler cache to speed up compilation leveraging local or remote caching
|
||||
RUN apt-get update -y && apt-get install -y ccache
|
||||
|
||||
# files and directories related to build wheels
|
||||
COPY csrc csrc
|
||||
COPY setup.py setup.py
|
||||
COPY cmake cmake
|
||||
COPY CMakeLists.txt CMakeLists.txt
|
||||
COPY requirements-common.txt requirements-common.txt
|
||||
COPY requirements-cuda.txt requirements-cuda.txt
|
||||
COPY pyproject.toml pyproject.toml
|
||||
COPY vllm vllm
|
||||
|
||||
# max jobs used by Ninja to build extensions
|
||||
ARG max_jobs=2
|
||||
ENV MAX_JOBS=${max_jobs}
|
||||
# number of threads used by nvcc
|
||||
ARG nvcc_threads=8
|
||||
ENV NVCC_THREADS=$nvcc_threads
|
||||
# make sure punica kernels are built (for LoRA)
|
||||
ENV VLLM_INSTALL_PUNICA_KERNELS=1
|
||||
|
||||
ARG buildkite_commit
|
||||
ENV BUILDKITE_COMMIT=${buildkite_commit}
|
||||
|
||||
ARG USE_SCCACHE
|
||||
# if USE_SCCACHE is set, use sccache to speed up compilation
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
if [ "$USE_SCCACHE" = "1" ]; then \
|
||||
echo "Installing sccache..." \
|
||||
&& curl -L -o sccache.tar.gz https://github.com/mozilla/sccache/releases/download/v0.8.1/sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz \
|
||||
&& tar -xzf sccache.tar.gz \
|
||||
&& sudo mv sccache-v0.8.1-x86_64-unknown-linux-musl/sccache /usr/bin/sccache \
|
||||
&& rm -rf sccache.tar.gz sccache-v0.8.1-x86_64-unknown-linux-musl \
|
||||
&& export SCCACHE_BUCKET=vllm-build-sccache \
|
||||
&& export SCCACHE_REGION=us-west-2 \
|
||||
&& export CMAKE_BUILD_TYPE=Release \
|
||||
&& sccache --show-stats \
|
||||
&& python3 setup.py bdist_wheel --dist-dir=dist --py-limited-api=cp38 \
|
||||
&& sccache --show-stats; \
|
||||
fi
|
||||
|
||||
ENV CCACHE_DIR=/root/.cache/ccache
|
||||
RUN --mount=type=cache,target=/root/.cache/ccache \
|
||||
--mount=type=cache,target=/root/.cache/pip \
|
||||
if [ "$USE_SCCACHE" != "1" ]; then \
|
||||
python3 setup.py bdist_wheel --dist-dir=dist --py-limited-api=cp38; \
|
||||
fi
|
||||
|
||||
# check the size of the wheel, we cannot upload wheels larger than 100MB
|
||||
COPY .buildkite/check-wheel-size.py check-wheel-size.py
|
||||
RUN python3 check-wheel-size.py dist
|
||||
|
||||
#################### EXTENSION Build IMAGE ####################
|
||||
|
||||
#################### DEV IMAGE ####################
|
||||
FROM base as dev
|
||||
|
||||
COPY requirements-lint.txt requirements-lint.txt
|
||||
COPY requirements-test.txt requirements-test.txt
|
||||
COPY requirements-dev.txt requirements-dev.txt
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
python3 -m pip install -r requirements-dev.txt
|
||||
|
||||
#################### DEV IMAGE ####################
|
||||
#################### MAMBA Build IMAGE ####################
|
||||
FROM dev as mamba-builder
|
||||
# max jobs used for build
|
||||
ARG max_jobs=2
|
||||
ENV MAX_JOBS=${max_jobs}
|
||||
|
||||
WORKDIR /usr/src/mamba
|
||||
|
||||
COPY requirements-mamba.txt requirements-mamba.txt
|
||||
|
||||
# Download the wheel or build it if a pre-compiled release doesn't exist
|
||||
RUN pip --verbose wheel -r requirements-mamba.txt \
|
||||
--no-build-isolation --no-deps --no-cache-dir
|
||||
|
||||
#################### MAMBA Build IMAGE ####################
|
||||
|
||||
#################### vLLM installation IMAGE ####################
|
||||
# image with vLLM installed
|
||||
FROM nvidia/cuda:${CUDA_VERSION}-base-ubuntu22.04 AS vllm-base
|
||||
ARG CUDA_VERSION=12.4.1
|
||||
WORKDIR /vllm-workspace
|
||||
|
||||
RUN apt-get update -y \
|
||||
&& apt-get install -y python3-pip git vim
|
||||
|
||||
# Workaround for https://github.com/openai/triton/issues/2507 and
|
||||
# https://github.com/pytorch/pytorch/issues/107960 -- hopefully
|
||||
# this won't be needed for future versions of this docker image
|
||||
# or future versions of triton.
|
||||
RUN ldconfig /usr/local/cuda-$(echo $CUDA_VERSION | cut -d. -f1,2)/compat/
|
||||
|
||||
# install vllm wheel first, so that torch etc will be installed
|
||||
RUN --mount=type=bind,from=build,src=/workspace/dist,target=/vllm-workspace/dist \
|
||||
--mount=type=cache,target=/root/.cache/pip \
|
||||
python3 -m pip install dist/*.whl --verbose
|
||||
|
||||
RUN --mount=type=bind,from=mamba-builder,src=/usr/src/mamba,target=/usr/src/mamba \
|
||||
--mount=type=cache,target=/root/.cache/pip \
|
||||
python3 -m pip install /usr/src/mamba/*.whl --no-cache-dir
|
||||
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
python3 -m pip install https://github.com/flashinfer-ai/flashinfer/releases/download/v0.0.9/flashinfer-0.0.9+cu121torch2.3-cp310-cp310-linux_x86_64.whl
|
||||
#################### vLLM installation IMAGE ####################
|
||||
|
||||
|
||||
#################### TEST IMAGE ####################
|
||||
# image to run unit testing suite
|
||||
# note that this uses vllm installed by `pip`
|
||||
FROM vllm-base AS test
|
||||
|
||||
ADD . /vllm-workspace/
|
||||
|
||||
# install development dependencies (for testing)
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
python3 -m pip install -r requirements-dev.txt
|
||||
|
||||
# doc requires source code
|
||||
# we hide them inside `test_docs/` , so that this source code
|
||||
# will not be imported by other tests
|
||||
RUN mkdir test_docs
|
||||
RUN mv docs test_docs/
|
||||
RUN mv vllm test_docs/
|
||||
|
||||
#################### TEST IMAGE ####################
|
||||
|
||||
#################### OPENAI API SERVER ####################
|
||||
# openai api server alternative
|
||||
FROM vllm-base AS vllm-openai
|
||||
|
||||
# install additional dependencies for openai api server
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
pip install accelerate hf_transfer 'modelscope!=1.15.0'
|
||||
|
||||
ENV VLLM_USAGE_SOURCE production-docker-image
|
||||
|
||||
ENTRYPOINT ["python3", "-m", "vllm.entrypoints.openai.api_server"]
|
||||
#################### OPENAI API SERVER ####################
|
40
Dockerfile.cpu
Normal file
40
Dockerfile.cpu
Normal file
@ -0,0 +1,40 @@
|
||||
# This vLLM Dockerfile is used to construct image that can build and run vLLM on x86 CPU platform.
|
||||
|
||||
FROM ubuntu:22.04 AS cpu-test-1
|
||||
|
||||
RUN apt-get update -y \
|
||||
&& apt-get install -y git wget vim numactl gcc-12 g++-12 python3 python3-pip libtcmalloc-minimal4 \
|
||||
&& update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-12 10 --slave /usr/bin/g++ g++ /usr/bin/g++-12
|
||||
|
||||
# https://intel.github.io/intel-extension-for-pytorch/cpu/latest/tutorials/performance_tuning/tuning_guide.html
|
||||
# intel-openmp provides additional performance improvement vs. openmp
|
||||
# tcmalloc provides better memory allocation efficiency, e.g, holding memory in caches to speed up access of commonly-used objects.
|
||||
RUN pip install intel-openmp
|
||||
|
||||
ENV LD_PRELOAD="/usr/lib/x86_64-linux-gnu/libtcmalloc_minimal.so.4:/usr/local/lib/libiomp5.so:$LD_PRELOAD"
|
||||
|
||||
|
||||
RUN pip install https://intel-extension-for-pytorch.s3.amazonaws.com/ipex_dev/cpu/intel_extension_for_pytorch-2.3.100%2Bgit0eb3473-cp310-cp310-linux_x86_64.whl
|
||||
|
||||
RUN pip install --upgrade pip \
|
||||
&& pip install wheel packaging ninja "setuptools>=49.4.0" numpy
|
||||
|
||||
FROM cpu-test-1 AS build
|
||||
|
||||
COPY ./ /workspace/vllm
|
||||
|
||||
WORKDIR /workspace/vllm
|
||||
|
||||
RUN pip install -v -r requirements-cpu.txt --extra-index-url https://download.pytorch.org/whl/cpu
|
||||
|
||||
# Support for building with non-AVX512 vLLM: docker build --build-arg VLLM_CPU_DISABLE_AVX512="true" ...
|
||||
ARG VLLM_CPU_DISABLE_AVX512
|
||||
ENV VLLM_CPU_DISABLE_AVX512=${VLLM_CPU_DISABLE_AVX512}
|
||||
|
||||
RUN VLLM_TARGET_DEVICE=cpu python3 setup.py install
|
||||
|
||||
WORKDIR /workspace/
|
||||
|
||||
RUN ln -s /workspace/vllm/tests && ln -s /workspace/vllm/examples && ln -s /workspace/vllm/benchmarks
|
||||
|
||||
ENTRYPOINT ["python3", "-m", "vllm.entrypoints.openai.api_server"]
|
36
Dockerfile.neuron
Normal file
36
Dockerfile.neuron
Normal file
@ -0,0 +1,36 @@
|
||||
# default base image
|
||||
ARG BASE_IMAGE="763104351884.dkr.ecr.us-west-2.amazonaws.com/pytorch-inference-neuronx:2.1.1-neuronx-py310-sdk2.17.0-ubuntu20.04"
|
||||
|
||||
FROM $BASE_IMAGE
|
||||
|
||||
RUN echo "Base image is $BASE_IMAGE"
|
||||
|
||||
# Install some basic utilities
|
||||
RUN apt-get update && apt-get install python3 python3-pip -y
|
||||
|
||||
### Mount Point ###
|
||||
# When launching the container, mount the code directory to /app
|
||||
ARG APP_MOUNT=/app
|
||||
VOLUME [ ${APP_MOUNT} ]
|
||||
WORKDIR ${APP_MOUNT}
|
||||
|
||||
RUN python3 -m pip install --upgrade pip
|
||||
RUN python3 -m pip install --no-cache-dir fastapi ninja tokenizers pandas
|
||||
RUN python3 -m pip install sentencepiece transformers==4.36.2 -U
|
||||
RUN python3 -m pip install transformers-neuronx --extra-index-url=https://pip.repos.neuron.amazonaws.com -U
|
||||
RUN python3 -m pip install --pre neuronx-cc==2.12.* --extra-index-url=https://pip.repos.neuron.amazonaws.com -U
|
||||
|
||||
COPY ./vllm /app/vllm/vllm
|
||||
COPY ./setup.py /app/vllm/setup.py
|
||||
COPY ./requirements-common.txt /app/vllm/requirements-common.txt
|
||||
COPY ./requirements-neuron.txt /app/vllm/requirements-neuron.txt
|
||||
|
||||
RUN cd /app/vllm \
|
||||
&& python3 -m pip install -U -r requirements-neuron.txt
|
||||
|
||||
ENV VLLM_TARGET_DEVICE neuron
|
||||
RUN cd /app/vllm \
|
||||
&& pip install -e . \
|
||||
&& cd ..
|
||||
|
||||
CMD ["/bin/bash"]
|
26
Dockerfile.openvino
Normal file
26
Dockerfile.openvino
Normal file
@ -0,0 +1,26 @@
|
||||
# The vLLM Dockerfile is used to construct vLLM image that can be directly used
|
||||
# to run the OpenAI compatible server.
|
||||
|
||||
FROM ubuntu:22.04 AS dev
|
||||
|
||||
RUN apt-get update -y && \
|
||||
apt-get install -y python3-pip git
|
||||
WORKDIR /workspace
|
||||
|
||||
# copy requirements
|
||||
COPY requirements-build.txt /workspace/vllm/
|
||||
COPY requirements-common.txt /workspace/vllm/
|
||||
COPY requirements-openvino.txt /workspace/vllm/
|
||||
|
||||
COPY vllm/ /workspace/vllm/vllm
|
||||
COPY setup.py /workspace/vllm/
|
||||
|
||||
# install build requirements
|
||||
RUN PIP_EXTRA_INDEX_URL="https://download.pytorch.org/whl/cpu" python3 -m pip install -r /workspace/vllm/requirements-build.txt
|
||||
# build vLLM with OpenVINO backend
|
||||
RUN PIP_PRE=1 PIP_EXTRA_INDEX_URL="https://download.pytorch.org/whl/cpu https://storage.openvinotoolkit.org/simple/wheels/nightly/" VLLM_TARGET_DEVICE="openvino" python3 -m pip install /workspace/vllm/
|
||||
|
||||
COPY examples/ /workspace/vllm/examples
|
||||
COPY benchmarks/ /workspace/vllm/benchmarks
|
||||
|
||||
CMD ["/bin/bash"]
|
22
Dockerfile.ppc64le
Normal file
22
Dockerfile.ppc64le
Normal file
@ -0,0 +1,22 @@
|
||||
FROM mambaorg/micromamba
|
||||
ARG MAMBA_DOCKERFILE_ACTIVATE=1
|
||||
USER root
|
||||
|
||||
RUN apt-get update -y && apt-get install -y git wget vim numactl gcc-12 g++-12 protobuf-compiler libprotobuf-dev && update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-12 10 --slave /usr/bin/g++ g++ /usr/bin/g++-12
|
||||
|
||||
# Some packages in requirements-cpu are installed here
|
||||
# IBM provides optimized packages for ppc64le processors in the open-ce project for mamba
|
||||
# Currently these may not be available for venv or pip directly
|
||||
RUN micromamba install -y -n base -c https://ftp.osuosl.org/pub/open-ce/1.11.0-p10/ -c defaults python=3.10 pytorch-cpu=2.1.2 torchvision-cpu=0.16.2 && micromamba clean --all --yes
|
||||
|
||||
COPY ./ /workspace/vllm
|
||||
|
||||
WORKDIR /workspace/vllm
|
||||
|
||||
# These packages will be in rocketce eventually
|
||||
RUN pip install -v -r requirements-cpu.txt --prefer-binary --extra-index-url https://repo.fury.io/mgiessing
|
||||
|
||||
RUN VLLM_TARGET_DEVICE=cpu python3 setup.py install
|
||||
|
||||
WORKDIR /vllm-workspace
|
||||
ENTRYPOINT ["/opt/conda/bin/python3", "-m", "vllm.entrypoints.openai.api_server"]
|
204
Dockerfile.rocm
Normal file
204
Dockerfile.rocm
Normal file
@ -0,0 +1,204 @@
|
||||
# Default ROCm 6.1 base image
|
||||
ARG BASE_IMAGE="rocm/pytorch:rocm6.1.2_ubuntu20.04_py3.9_pytorch_staging"
|
||||
|
||||
# Tested and supported base rocm/pytorch images
|
||||
ARG ROCm_5_7_BASE="rocm/pytorch:rocm5.7_ubuntu20.04_py3.9_pytorch_2.0.1" \
|
||||
ROCm_6_0_BASE="rocm/pytorch:rocm6.0_ubuntu20.04_py3.9_pytorch_2.1.1" \
|
||||
ROCM_6_1_BASE="rocm/pytorch:rocm6.1.2_ubuntu20.04_py3.9_pytorch_staging"
|
||||
|
||||
# Default ROCm ARCHes to build vLLM for.
|
||||
ARG PYTORCH_ROCM_ARCH="gfx908;gfx90a;gfx942;gfx1100"
|
||||
|
||||
# Whether to build CK-based flash-attention
|
||||
# If 0, will not build flash attention
|
||||
# This is useful for gfx target where flash-attention is not supported
|
||||
# (i.e. those that do not appear in `FA_GFX_ARCHS`)
|
||||
# Triton FA is used by default on ROCm now so this is unnecessary.
|
||||
ARG BUILD_FA="1"
|
||||
ARG FA_GFX_ARCHS="gfx90a;gfx942"
|
||||
ARG FA_BRANCH="ae7928c"
|
||||
|
||||
# Whether to build triton on rocm
|
||||
ARG BUILD_TRITON="1"
|
||||
ARG TRITON_BRANCH="0ef1848"
|
||||
|
||||
### Base image build stage
|
||||
FROM $BASE_IMAGE AS base
|
||||
|
||||
# Import arg(s) defined before this build stage
|
||||
ARG PYTORCH_ROCM_ARCH
|
||||
|
||||
# Install some basic utilities
|
||||
RUN apt-get update && apt-get install python3 python3-pip -y
|
||||
RUN apt-get update && apt-get install -y \
|
||||
curl \
|
||||
ca-certificates \
|
||||
sudo \
|
||||
git \
|
||||
bzip2 \
|
||||
libx11-6 \
|
||||
build-essential \
|
||||
wget \
|
||||
unzip \
|
||||
tmux \
|
||||
ccache \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# When launching the container, mount the code directory to /vllm-workspace
|
||||
ARG APP_MOUNT=/vllm-workspace
|
||||
WORKDIR ${APP_MOUNT}
|
||||
|
||||
RUN pip install --upgrade pip
|
||||
# Remove sccache so it doesn't interfere with ccache
|
||||
# TODO: implement sccache support across components
|
||||
RUN apt-get purge -y sccache; pip uninstall -y sccache; rm -f "$(which sccache)"
|
||||
# Install torch == 2.5.0 on ROCm
|
||||
RUN case "$(ls /opt | grep -Po 'rocm-[0-9]\.[0-9]')" in \
|
||||
*"rocm-5.7"*) \
|
||||
pip uninstall -y torch torchaudio torchvision \
|
||||
&& pip install --no-cache-dir --pre \
|
||||
torch==2.5.0.dev20240710 torchaudio==2.4.0.dev20240710 \
|
||||
torchvision==0.20.0.dev20240710 \
|
||||
--index-url https://download.pytorch.org/whl/nightly/rocm5.7;; \
|
||||
*"rocm-6.0"*) \
|
||||
pip uninstall -y torch torchaudio torchvision \
|
||||
&& pip install --no-cache-dir --pre \
|
||||
torch==2.5.0.dev20240710 torchaudio==2.4.0.dev20240710 \
|
||||
torchvision==0.20.0.dev20240710 \
|
||||
--index-url https://download.pytorch.org/whl/nightly/rocm6.0;; \
|
||||
*"rocm-6.1"*) \
|
||||
pip uninstall -y torch torchaudio torchvision \
|
||||
&& pip install --no-cache-dir --pre \
|
||||
torch==2.5.0.dev20240710 torchaudio==2.4.0.dev20240710 \
|
||||
torchvision==0.20.0.dev20240710 \
|
||||
--index-url https://download.pytorch.org/whl/nightly/rocm6.1;; \
|
||||
*) ;; esac
|
||||
|
||||
ENV LLVM_SYMBOLIZER_PATH=/opt/rocm/llvm/bin/llvm-symbolizer
|
||||
ENV PATH=$PATH:/opt/rocm/bin:/libtorch/bin:
|
||||
ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/rocm/lib/:/libtorch/lib:
|
||||
ENV CPLUS_INCLUDE_PATH=$CPLUS_INCLUDE_PATH:/libtorch/include:/libtorch/include/torch/csrc/api/include/:/opt/rocm/include/:
|
||||
|
||||
ENV PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH}
|
||||
ENV CCACHE_DIR=/root/.cache/ccache
|
||||
|
||||
|
||||
### AMD-SMI build stage
|
||||
FROM base AS build_amdsmi
|
||||
# Build amdsmi wheel always
|
||||
RUN cd /opt/rocm/share/amd_smi \
|
||||
&& pip wheel . --wheel-dir=/install
|
||||
|
||||
|
||||
### Flash-Attention wheel build stage
|
||||
FROM base AS build_fa
|
||||
ARG BUILD_FA
|
||||
ARG FA_GFX_ARCHS
|
||||
ARG FA_BRANCH
|
||||
# Build ROCm flash-attention wheel if `BUILD_FA = 1`
|
||||
RUN --mount=type=cache,target=${CCACHE_DIR} \
|
||||
if [ "$BUILD_FA" = "1" ]; then \
|
||||
mkdir -p libs \
|
||||
&& cd libs \
|
||||
&& git clone https://github.com/ROCm/flash-attention.git \
|
||||
&& cd flash-attention \
|
||||
&& git checkout "${FA_BRANCH}" \
|
||||
&& git submodule update --init \
|
||||
&& case "$(ls /opt | grep -Po 'rocm-[0-9]\.[0-9]')" in \
|
||||
*"rocm-5.7"*) \
|
||||
export VLLM_TORCH_PATH="$(python3 -c 'import torch; print(torch.__path__[0])')" \
|
||||
&& patch "${VLLM_TORCH_PATH}"/utils/hipify/hipify_python.py hipify_patch.patch;; \
|
||||
*) ;; esac \
|
||||
&& GPU_ARCHS="${FA_GFX_ARCHS}" python3 setup.py bdist_wheel --dist-dir=/install; \
|
||||
# Create an empty directory otherwise as later build stages expect one
|
||||
else mkdir -p /install; \
|
||||
fi
|
||||
|
||||
|
||||
### Triton wheel build stage
|
||||
FROM base AS build_triton
|
||||
ARG BUILD_TRITON
|
||||
ARG TRITON_BRANCH
|
||||
# Build triton wheel if `BUILD_TRITON = 1`
|
||||
RUN --mount=type=cache,target=${CCACHE_DIR} \
|
||||
if [ "$BUILD_TRITON" = "1" ]; then \
|
||||
mkdir -p libs \
|
||||
&& cd libs \
|
||||
&& git clone https://github.com/OpenAI/triton.git \
|
||||
&& cd triton \
|
||||
&& git checkout "${TRITON_BRANCH}" \
|
||||
&& cd python \
|
||||
&& python3 setup.py bdist_wheel --dist-dir=/install; \
|
||||
# Create an empty directory otherwise as later build stages expect one
|
||||
else mkdir -p /install; \
|
||||
fi
|
||||
|
||||
|
||||
### Final vLLM build stage
|
||||
FROM base AS final
|
||||
# Import the vLLM development directory from the build context
|
||||
COPY . .
|
||||
|
||||
# Error related to odd state for numpy 1.20.3 where there is no METADATA etc, but an extra LICENSES_bundled.txt.
|
||||
# Manually remove it so that later steps of numpy upgrade can continue
|
||||
RUN case "$(which python3)" in \
|
||||
*"/opt/conda/envs/py_3.9"*) \
|
||||
rm -rf /opt/conda/envs/py_3.9/lib/python3.9/site-packages/numpy-1.20.3.dist-info/;; \
|
||||
*) ;; esac
|
||||
|
||||
# Package upgrades for useful functionality or to avoid dependency issues
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
pip install --upgrade numba scipy huggingface-hub[cli]
|
||||
|
||||
# Make sure punica kernels are built (for LoRA)
|
||||
ENV VLLM_INSTALL_PUNICA_KERNELS=1
|
||||
# Workaround for ray >= 2.10.0
|
||||
ENV RAY_EXPERIMENTAL_NOSET_ROCR_VISIBLE_DEVICES=1
|
||||
# Silences the HF Tokenizers warning
|
||||
ENV TOKENIZERS_PARALLELISM=false
|
||||
|
||||
RUN --mount=type=cache,target=${CCACHE_DIR} \
|
||||
--mount=type=cache,target=/root/.cache/pip \
|
||||
pip install -U -r requirements-rocm.txt \
|
||||
&& case "$(ls /opt | grep -Po 'rocm-[0-9]\.[0-9]')" in \
|
||||
*"rocm-6.0"*) \
|
||||
patch /opt/rocm/include/hip/amd_detail/amd_hip_bf16.h rocm_patch/rocm_bf16.patch;; \
|
||||
*"rocm-6.1"*) \
|
||||
# Bring in upgrades to HIP graph earlier than ROCm 6.2 for vLLM
|
||||
wget -N https://github.com/ROCm/vllm/raw/fa78403/rocm_patch/libamdhip64.so.6 -P rocm_patch \
|
||||
&& cp rocm_patch/libamdhip64.so.6 /opt/rocm/lib/libamdhip64.so.6 \
|
||||
# Prevent interference if torch bundles its own HIP runtime
|
||||
&& rm -f "$(python3 -c 'import torch; print(torch.__path__[0])')"/lib/libamdhip64.so* || true;; \
|
||||
*) ;; esac \
|
||||
&& python3 setup.py clean --all \
|
||||
&& python3 setup.py develop
|
||||
|
||||
# Copy amdsmi wheel into final image
|
||||
RUN --mount=type=bind,from=build_amdsmi,src=/install,target=/install \
|
||||
mkdir -p libs \
|
||||
&& cp /install/*.whl libs \
|
||||
# Preemptively uninstall to avoid same-version no-installs
|
||||
&& pip uninstall -y amdsmi;
|
||||
|
||||
# Copy triton wheel(s) into final image if they were built
|
||||
RUN --mount=type=bind,from=build_triton,src=/install,target=/install \
|
||||
mkdir -p libs \
|
||||
&& if ls /install/*.whl; then \
|
||||
cp /install/*.whl libs \
|
||||
# Preemptively uninstall to avoid same-version no-installs
|
||||
&& pip uninstall -y triton; fi
|
||||
|
||||
# Copy flash-attn wheel(s) into final image if they were built
|
||||
RUN --mount=type=bind,from=build_fa,src=/install,target=/install \
|
||||
mkdir -p libs \
|
||||
&& if ls /install/*.whl; then \
|
||||
cp /install/*.whl libs \
|
||||
# Preemptively uninstall to avoid same-version no-installs
|
||||
&& pip uninstall -y flash-attn; fi
|
||||
|
||||
# Install wheels that were built to the final image
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
if ls libs/*.whl; then \
|
||||
pip install libs/*.whl; fi
|
||||
|
||||
CMD ["/bin/bash"]
|
23
Dockerfile.tpu
Normal file
23
Dockerfile.tpu
Normal file
@ -0,0 +1,23 @@
|
||||
ARG NIGHTLY_DATE="20240601"
|
||||
ARG BASE_IMAGE="us-central1-docker.pkg.dev/tpu-pytorch-releases/docker/xla:nightly_3.10_tpuvm_$NIGHTLY_DATE"
|
||||
|
||||
FROM $BASE_IMAGE
|
||||
WORKDIR /workspace
|
||||
|
||||
# Install aiohttp separately to avoid build errors.
|
||||
RUN pip install aiohttp
|
||||
# Install the TPU and Pallas dependencies.
|
||||
RUN pip install torch_xla[tpu] -f https://storage.googleapis.com/libtpu-releases/index.html
|
||||
RUN pip install torch_xla[pallas] -f https://storage.googleapis.com/jax-releases/jax_nightly_releases.html -f https://storage.googleapis.com/jax-releases/jaxlib_nightly_releases.html
|
||||
|
||||
# Build vLLM.
|
||||
COPY . /workspace/vllm
|
||||
ENV VLLM_TARGET_DEVICE="tpu"
|
||||
RUN cd /workspace/vllm && python setup.py develop
|
||||
|
||||
# Re-install outlines to avoid dependency errors.
|
||||
# The outlines version must follow requirements-common.txt.
|
||||
RUN pip uninstall outlines -y
|
||||
RUN pip install "outlines>=0.0.43"
|
||||
|
||||
CMD ["/bin/bash"]
|
22
Dockerfile.xpu
Normal file
22
Dockerfile.xpu
Normal file
@ -0,0 +1,22 @@
|
||||
FROM intel/oneapi-basekit:2024.1.0-devel-ubuntu22.04
|
||||
|
||||
RUN wget -O- https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB | gpg --dearmor | tee /usr/share/keyrings/intel-oneapi-archive-keyring.gpg > /dev/null && \
|
||||
echo "deb [signed-by=/usr/share/keyrings/intel-oneapi-archive-keyring.gpg] https://apt.repos.intel.com/oneapi all main " | tee /etc/apt/sources.list.d/oneAPI.list && \
|
||||
chmod 644 /usr/share/keyrings/intel-oneapi-archive-keyring.gpg && \
|
||||
rm /etc/apt/sources.list.d/intel-graphics.list && \
|
||||
wget -O- https://repositories.intel.com/graphics/intel-graphics.key | gpg --dearmor | tee /usr/share/keyrings/intel-graphics.gpg > /dev/null && \
|
||||
echo "deb [arch=amd64,i386 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/graphics/ubuntu jammy arc" | tee /etc/apt/sources.list.d/intel.gpu.jammy.list && \
|
||||
chmod 644 /usr/share/keyrings/intel-graphics.gpg
|
||||
|
||||
RUN apt-get update -y \
|
||||
&& apt-get install -y curl libicu70 lsb-release git wget vim numactl python3 python3-pip
|
||||
|
||||
COPY ./ /workspace/vllm
|
||||
|
||||
WORKDIR /workspace/vllm
|
||||
|
||||
RUN pip install -v -r requirements-xpu.txt
|
||||
|
||||
RUN VLLM_TARGET_DEVICE=xpu python3 setup.py install
|
||||
|
||||
CMD ["/bin/bash"]
|
201
LICENSE
Normal file
201
LICENSE
Normal file
@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
10
MANIFEST.in
Normal file
10
MANIFEST.in
Normal file
@ -0,0 +1,10 @@
|
||||
include LICENSE
|
||||
include requirements-common.txt
|
||||
include requirements-cuda.txt
|
||||
include requirements-rocm.txt
|
||||
include requirements-neuron.txt
|
||||
include requirements-cpu.txt
|
||||
include CMakeLists.txt
|
||||
|
||||
recursive-include cmake *
|
||||
recursive-include csrc *
|
174
README.md
174
README.md
@ -1,72 +1,116 @@
|
||||
# CacheFlow
|
||||
<p align="center">
|
||||
<picture>
|
||||
<source media="(prefers-color-scheme: dark)" srcset="https://raw.githubusercontent.com/vllm-project/vllm/main/docs/source/assets/logos/vllm-logo-text-dark.png">
|
||||
<img alt="vLLM" src="https://raw.githubusercontent.com/vllm-project/vllm/main/docs/source/assets/logos/vllm-logo-text-light.png" width=55%>
|
||||
</picture>
|
||||
</p>
|
||||
|
||||
## Installation
|
||||
<h3 align="center">
|
||||
Easy, fast, and cheap LLM serving for everyone
|
||||
</h3>
|
||||
|
||||
<p align="center">
|
||||
| <a href="https://docs.vllm.ai"><b>Documentation</b></a> | <a href="https://vllm.ai"><b>Blog</b></a> | <a href="https://arxiv.org/abs/2309.06180"><b>Paper</b></a> | <a href="https://discord.gg/jz7wjKhh6g"><b>Discord</b></a> |
|
||||
|
||||
</p>
|
||||
|
||||
---
|
||||
|
||||
*Latest News* 🔥
|
||||
- [2024/06] We hosted [the fourth vLLM meetup](https://lu.ma/agivllm) with Cloudflare and BentoML! Please find the meetup slides [here](https://docs.google.com/presentation/d/1iJ8o7V2bQEi0BFEljLTwc5G1S10_Rhv3beed5oB0NJ4/edit?usp=sharing).
|
||||
- [2024/04] We hosted [the third vLLM meetup](https://robloxandvllmmeetup2024.splashthat.com/) with Roblox! Please find the meetup slides [here](https://docs.google.com/presentation/d/1A--47JAK4BJ39t954HyTkvtfwn0fkqtsL8NGFuslReM/edit?usp=sharing).
|
||||
- [2024/01] We hosted [the second vLLM meetup](https://lu.ma/ygxbpzhl) with IBM! Please find the meetup slides [here](https://docs.google.com/presentation/d/12mI2sKABnUw5RBWXDYY-HtHth4iMSNcEoQ10jDQbxgA/edit?usp=sharing).
|
||||
- [2023/10] We hosted [the first vLLM meetup](https://lu.ma/first-vllm-meetup) with a16z! Please find the meetup slides [here](https://docs.google.com/presentation/d/1QL-XPFXiFpDBh86DbEegFXBXFXjix4v032GhShbKf3s/edit?usp=sharing).
|
||||
- [2023/08] We would like to express our sincere gratitude to [Andreessen Horowitz](https://a16z.com/2023/08/30/supporting-the-open-source-ai-community/) (a16z) for providing a generous grant to support the open-source development and research of vLLM.
|
||||
- [2023/06] We officially released vLLM! FastChat-vLLM integration has powered [LMSYS Vicuna and Chatbot Arena](https://chat.lmsys.org) since mid-April. Check out our [blog post](https://vllm.ai).
|
||||
|
||||
---
|
||||
## About
|
||||
vLLM is a fast and easy-to-use library for LLM inference and serving.
|
||||
|
||||
vLLM is fast with:
|
||||
|
||||
- State-of-the-art serving throughput
|
||||
- Efficient management of attention key and value memory with **PagedAttention**
|
||||
- Continuous batching of incoming requests
|
||||
- Fast model execution with CUDA/HIP graph
|
||||
- Quantization: [GPTQ](https://arxiv.org/abs/2210.17323), [AWQ](https://arxiv.org/abs/2306.00978), [SqueezeLLM](https://arxiv.org/abs/2306.07629), FP8 KV Cache
|
||||
- Optimized CUDA kernels
|
||||
|
||||
**Performance benchmark**: We include a [performance benchmark](https://buildkite.com/vllm/performance-benchmark/builds/3924) that compares the performance of vllm against other LLM serving engines ([TensorRT-LLM](https://github.com/NVIDIA/TensorRT-LLM), [text-generation-inference](https://github.com/huggingface/text-generation-inference) and [lmdeploy](https://github.com/InternLM/lmdeploy)).
|
||||
|
||||
vLLM is flexible and easy to use with:
|
||||
|
||||
- Seamless integration with popular Hugging Face models
|
||||
- High-throughput serving with various decoding algorithms, including *parallel sampling*, *beam search*, and more
|
||||
- Tensor parallelism and pipeline parallelism support for distributed inference
|
||||
- Streaming outputs
|
||||
- OpenAI-compatible API server
|
||||
- Support NVIDIA GPUs, AMD CPUs and GPUs, Intel CPUs and GPUs, PowerPC CPUs
|
||||
- (Experimental) Prefix caching support
|
||||
- (Experimental) Multi-lora support
|
||||
|
||||
vLLM seamlessly supports most popular open-source models on HuggingFace, including:
|
||||
- Transformer-like LLMs (e.g., Llama)
|
||||
- Mixture-of-Expert LLMs (e.g., Mixtral)
|
||||
- Multi-modal LLMs (e.g., LLaVA)
|
||||
|
||||
Find the full list of supported models [here](https://docs.vllm.ai/en/latest/models/supported_models.html).
|
||||
|
||||
## Getting Started
|
||||
|
||||
Install vLLM with pip or [from source](https://vllm.readthedocs.io/en/latest/getting_started/installation.html#build-from-source):
|
||||
|
||||
```bash
|
||||
pip install psutil numpy ray torch
|
||||
pip install git+https://github.com/huggingface/transformers # Required for LLaMA.
|
||||
pip install sentencepiece # Required for LlamaTokenizer.
|
||||
pip install ninja # To parallelize the compilation of flash-attn.
|
||||
pip install flash-attn # This may take up to 10 mins.
|
||||
pip install -e .
|
||||
pip install vllm
|
||||
```
|
||||
|
||||
## Test simple server
|
||||
Visit our [documentation](https://vllm.readthedocs.io/en/latest/) to learn more.
|
||||
- [Installation](https://vllm.readthedocs.io/en/latest/getting_started/installation.html)
|
||||
- [Quickstart](https://vllm.readthedocs.io/en/latest/getting_started/quickstart.html)
|
||||
- [Supported Models](https://vllm.readthedocs.io/en/latest/models/supported_models.html)
|
||||
|
||||
```bash
|
||||
ray start --head
|
||||
python simple_server.py
|
||||
## Contributing
|
||||
|
||||
We welcome and value any contributions and collaborations.
|
||||
Please check out [CONTRIBUTING.md](./CONTRIBUTING.md) for how to get involved.
|
||||
|
||||
## Sponsors
|
||||
|
||||
vLLM is a community project. Our compute resources for development and testing are supported by the following organizations. Thank you for your support!
|
||||
|
||||
<!-- Note: Please sort them in alphabetical order. -->
|
||||
<!-- Note: Please keep these consistent with docs/source/community/sponsors.md -->
|
||||
|
||||
- a16z
|
||||
- AMD
|
||||
- Anyscale
|
||||
- AWS
|
||||
- Crusoe Cloud
|
||||
- Databricks
|
||||
- DeepInfra
|
||||
- Dropbox
|
||||
- Lambda Lab
|
||||
- NVIDIA
|
||||
- Replicate
|
||||
- Roblox
|
||||
- RunPod
|
||||
- Sequoia Capital
|
||||
- Trainy
|
||||
- UC Berkeley
|
||||
- UC San Diego
|
||||
- ZhenFund
|
||||
|
||||
We also have an official fundraising venue through [OpenCollective](https://opencollective.com/vllm). We plan to use the fund to support the development, maintenance, and adoption of vLLM.
|
||||
|
||||
## Citation
|
||||
|
||||
If you use vLLM for your research, please cite our [paper](https://arxiv.org/abs/2309.06180):
|
||||
```bibtex
|
||||
@inproceedings{kwon2023efficient,
|
||||
title={Efficient Memory Management for Large Language Model Serving with PagedAttention},
|
||||
author={Woosuk Kwon and Zhuohan Li and Siyuan Zhuang and Ying Sheng and Lianmin Zheng and Cody Hao Yu and Joseph E. Gonzalez and Hao Zhang and Ion Stoica},
|
||||
booktitle={Proceedings of the ACM SIGOPS 29th Symposium on Operating Systems Principles},
|
||||
year={2023}
|
||||
}
|
||||
```
|
||||
|
||||
The detailed arguments for `simple_server.py` can be found by:
|
||||
```bash
|
||||
python simple_server.py --help
|
||||
```
|
||||
|
||||
## FastAPI server
|
||||
|
||||
Install the following additional dependencies:
|
||||
```bash
|
||||
pip install fastapi uvicorn
|
||||
```
|
||||
|
||||
To start the server:
|
||||
```bash
|
||||
ray start --head
|
||||
python -m cacheflow.http_frontend.fastapi_frontend
|
||||
```
|
||||
|
||||
To test the server:
|
||||
```bash
|
||||
python -m cacheflow.http_frontend.test_cli_client
|
||||
```
|
||||
|
||||
## Gradio web server
|
||||
|
||||
Install the following additional dependencies:
|
||||
```bash
|
||||
pip install gradio
|
||||
```
|
||||
|
||||
Start the server:
|
||||
```bash
|
||||
python -m cacheflow.http_frontend.fastapi_frontend
|
||||
# At another terminal
|
||||
python -m cacheflow.http_frontend.gradio_webserver
|
||||
```
|
||||
|
||||
## Load LLaMA weights
|
||||
|
||||
Since LLaMA weight is not fully public, we cannot directly download the LLaMA weights from huggingface. Therefore, you need to follow the following process to load the LLaMA weights.
|
||||
|
||||
1. Converting LLaMA weights to huggingface format with [this script](https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/convert_llama_weights_to_hf.py).
|
||||
```bash
|
||||
python src/transformers/models/llama/convert_llama_weights_to_hf.py \
|
||||
--input_dir /path/to/downloaded/llama/weights --model_size 7B --output_dir /output/path/llama-7b
|
||||
```
|
||||
Please make sure that `llama` is included in the output directory name.
|
||||
2. For all the commands above, specify the model with `--model /output/path/llama-7b` to load the model. For example:
|
||||
```bash
|
||||
python simple_server.py --model /output/path/llama-7b
|
||||
python -m cacheflow.http_frontend.fastapi_frontend --model /output/path/llama-7b
|
||||
```
|
||||
|
@ -1,165 +0,0 @@
|
||||
import functools
|
||||
import random
|
||||
import time
|
||||
from typing import List
|
||||
|
||||
from flash_attn.flash_attn_interface import _flash_attn_forward
|
||||
import torch
|
||||
|
||||
from cacheflow import attention_ops
|
||||
|
||||
|
||||
def benchmark(name, f, num_warmup = 10, num_iters = 100):
|
||||
for _ in range(num_warmup):
|
||||
f()
|
||||
torch.cuda.synchronize()
|
||||
|
||||
start = time.time()
|
||||
for _ in range(num_iters):
|
||||
f()
|
||||
torch.cuda.synchronize()
|
||||
end = time.time()
|
||||
print(f'{name}: {(end - start) / num_iters * 1000:.3f} ms')
|
||||
|
||||
|
||||
@torch.inference_mode()
|
||||
def benchmark_multi_query_cached_kv_attention(
|
||||
query_lens: List[int],
|
||||
context_lens: List[int],
|
||||
num_heads: int,
|
||||
head_size: int,
|
||||
block_size: int,
|
||||
num_blocks: int,
|
||||
dtype: torch.dtype,
|
||||
) -> None:
|
||||
print(f'query_lens: {query_lens}, context_lens: {context_lens}, '
|
||||
f'num_heads: {num_heads}, head_size: {head_size}, block_size: '
|
||||
f'{block_size}, num_blocks: {num_blocks}, dtype: {dtype}')
|
||||
# Create query tensor.
|
||||
num_queries = len(query_lens)
|
||||
cu_query_lens = [0]
|
||||
for query_len in query_lens:
|
||||
cu_query_lens.append(cu_query_lens[-1] + query_len)
|
||||
num_total_tokens = cu_query_lens[-1]
|
||||
qkv = torch.randn(
|
||||
num_total_tokens, 3, num_heads, head_size, dtype=dtype, device='cuda')
|
||||
query, _, _ = qkv.unbind(dim=1)
|
||||
|
||||
# Create key and value cache.
|
||||
x = 16 // torch.tensor([], dtype=dtype).element_size()
|
||||
key_block_shape = (num_heads, head_size // x, block_size, x)
|
||||
key_cache = torch.randn(
|
||||
size=(num_blocks, *key_block_shape), dtype=dtype, device='cuda')
|
||||
value_block_shape = (num_heads, head_size, block_size)
|
||||
value_cache = torch.randn(
|
||||
size=(num_blocks, *value_block_shape), dtype=dtype, device='cuda')
|
||||
|
||||
# Create block tables.
|
||||
max_context_len = max(context_lens)
|
||||
max_num_blocks_per_seq = (max_context_len + block_size - 1) // block_size
|
||||
block_tables = []
|
||||
for _ in range(num_queries):
|
||||
block_table = [
|
||||
random.randint(0, num_blocks - 1)
|
||||
for _ in range(max_num_blocks_per_seq)
|
||||
]
|
||||
block_tables.append(block_table)
|
||||
block_tables = torch.tensor(block_tables, dtype=torch.int, device='cuda')
|
||||
|
||||
# Create input and output data structures.
|
||||
cu_query_lens = torch.tensor(cu_query_lens, dtype=torch.int, device='cuda')
|
||||
context_len_tensor = torch.tensor(context_lens, dtype=torch.int, device='cuda')
|
||||
scale = float(1.0 / (head_size ** 0.5))
|
||||
output = torch.empty(
|
||||
num_total_tokens, num_heads, head_size, dtype=dtype, device='cuda')
|
||||
|
||||
# Run our implementation.
|
||||
def run_ours():
|
||||
attention_ops.multi_query_cached_kv_attention(
|
||||
cu_query_lens,
|
||||
output,
|
||||
query,
|
||||
key_cache,
|
||||
value_cache,
|
||||
scale,
|
||||
block_tables,
|
||||
context_len_tensor,
|
||||
block_size,
|
||||
max_context_len,
|
||||
)
|
||||
benchmark('Ours', run_ours)
|
||||
|
||||
# Upper bound: Flash attention.
|
||||
# Becuase Flash attention cannot read our own cache,
|
||||
# we make key and value tensors contiguous.
|
||||
num_kv_tokens = sum(context_lens)
|
||||
cu_context_lens = [0]
|
||||
for context_len in context_lens:
|
||||
cu_context_lens.append(cu_context_lens[-1] + context_len)
|
||||
cu_context_lens = torch.tensor(cu_context_lens, dtype=torch.int, device='cuda')
|
||||
qkv = torch.randn(
|
||||
num_kv_tokens, 3, num_heads, head_size, dtype=dtype, device='cuda')
|
||||
_, key, value = qkv.unbind(dim=1)
|
||||
ref_output = torch.empty_like(output)
|
||||
|
||||
# Run Flash attention.
|
||||
def run_flash_attn():
|
||||
_flash_attn_forward(
|
||||
query,
|
||||
key,
|
||||
value,
|
||||
ref_output,
|
||||
cu_query_lens,
|
||||
cu_context_lens,
|
||||
max(query_lens),
|
||||
max_context_len,
|
||||
dropout_p=0.0,
|
||||
softmax_scale=scale,
|
||||
causal=True,
|
||||
return_softmax=False,
|
||||
)
|
||||
benchmark('Flash attention', run_flash_attn)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
BLOCK_SIZE = 8
|
||||
NUM_BLOCKS = 1024
|
||||
DTYPE = torch.half
|
||||
|
||||
# LLaMA-13B and OPT-13B
|
||||
NUM_HEADS = 40
|
||||
HEAD_SIZE = 128
|
||||
|
||||
run_benchmark = functools.partial(
|
||||
benchmark_multi_query_cached_kv_attention,
|
||||
num_heads=NUM_HEADS,
|
||||
head_size=HEAD_SIZE,
|
||||
block_size=BLOCK_SIZE,
|
||||
num_blocks=NUM_BLOCKS,
|
||||
dtype=DTYPE,
|
||||
)
|
||||
|
||||
run_benchmark(
|
||||
query_lens=[64] * 1,
|
||||
context_lens=[64] * 1,
|
||||
)
|
||||
run_benchmark(
|
||||
query_lens=[128] * 1,
|
||||
context_lens=[128] * 1,
|
||||
)
|
||||
run_benchmark(
|
||||
query_lens=[64] * 8,
|
||||
context_lens=[64] * 8,
|
||||
)
|
||||
run_benchmark(
|
||||
query_lens=[128] * 8,
|
||||
context_lens=[128] * 8,
|
||||
)
|
||||
run_benchmark(
|
||||
query_lens=[64, 32, 16],
|
||||
context_lens=[128, 256, 64],
|
||||
)
|
||||
run_benchmark(
|
||||
query_lens=[1024],
|
||||
context_lens=[1024],
|
||||
)
|
@ -1,81 +0,0 @@
|
||||
import functools
|
||||
import random
|
||||
import time
|
||||
|
||||
import torch
|
||||
|
||||
from cacheflow import cache_ops
|
||||
|
||||
|
||||
def benchmark(name, f, size: int, num_warmup = 10, num_iters = 100):
|
||||
for _ in range(num_warmup):
|
||||
f()
|
||||
torch.cuda.synchronize()
|
||||
|
||||
start = time.time()
|
||||
for _ in range(num_iters):
|
||||
f()
|
||||
torch.cuda.synchronize()
|
||||
end = time.time()
|
||||
avg_time = (end - start) / num_iters
|
||||
print(f'[Latency] {name}: {avg_time * 1000:.3f} ms')
|
||||
print(f'[Throughput] {name}: {size / avg_time / 2 ** 30:.3f} GB/s')
|
||||
|
||||
|
||||
@torch.inference_mode()
|
||||
def test_gather_cached_kv(
|
||||
num_tokens: int,
|
||||
num_heads: int,
|
||||
head_size: int,
|
||||
block_size: int,
|
||||
num_blocks: int,
|
||||
dtype: torch.dtype,
|
||||
) -> None:
|
||||
print(f'num_tokens: {num_tokens}, num_heads: {num_heads}, '
|
||||
f'head_size: {head_size}, block_size: {block_size}, '
|
||||
f'num_blocks: {num_blocks}, dtype: {dtype}')
|
||||
|
||||
num_slots = block_size * num_blocks
|
||||
slot_mapping = random.sample(range(num_slots), num_tokens)
|
||||
slot_mapping = torch.tensor(slot_mapping, dtype=torch.int, device='cuda')
|
||||
|
||||
qkv = torch.randn(
|
||||
num_tokens, 3, num_heads, head_size, dtype=dtype, device='cuda')
|
||||
_, key, value = qkv.unbind(dim=1)
|
||||
|
||||
x = 16 // torch.tensor([], dtype=dtype).element_size()
|
||||
key_cache_shape = (num_blocks, num_heads, head_size // x, block_size, x)
|
||||
key_cache = torch.randn(size=key_cache_shape, dtype=dtype, device='cuda')
|
||||
|
||||
value_cache_shape = (num_blocks, num_heads, head_size, block_size)
|
||||
value_cache = torch.randn(
|
||||
size=value_cache_shape, dtype=dtype, device='cuda')
|
||||
|
||||
# Run Flash attention.
|
||||
def run():
|
||||
cache_ops.gather_cached_kv(key, value, key_cache, value_cache, slot_mapping)
|
||||
|
||||
benchmark('gather_cached_kv', run,
|
||||
size=num_tokens * num_heads * head_size * 2 * qkv.element_size())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
BLOCK_SIZE = 8
|
||||
NUM_BLOCKS = 1024
|
||||
DTYPE = torch.half
|
||||
|
||||
# LLaMA-13B and OPT-13B
|
||||
NUM_HEADS = 40
|
||||
HEAD_SIZE = 128
|
||||
|
||||
run_benchmark = functools.partial(
|
||||
test_gather_cached_kv,
|
||||
num_heads=NUM_HEADS,
|
||||
head_size=HEAD_SIZE,
|
||||
block_size=BLOCK_SIZE,
|
||||
num_blocks=NUM_BLOCKS,
|
||||
dtype=DTYPE,
|
||||
)
|
||||
|
||||
for i in range(6, 12):
|
||||
run_benchmark(num_tokens=2 ** i)
|
@ -1,105 +0,0 @@
|
||||
import argparse
|
||||
import time
|
||||
from typing import List
|
||||
|
||||
from tqdm import tqdm
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
from cacheflow.master.simple_frontend import SimpleFrontend
|
||||
from cacheflow.master.server import (Server, add_server_arguments,
|
||||
initialize_ray_cluster)
|
||||
from cacheflow.sampling_params import SamplingParams
|
||||
from cacheflow.utils import get_gpu_memory, get_cpu_memory
|
||||
|
||||
|
||||
def main(args: argparse.Namespace):
|
||||
# TODO(zhuohan): Support pipeline parallelism.
|
||||
assert args.pipeline_parallel_size == 1, (
|
||||
'Pipeline parallelism is not supported yet.')
|
||||
|
||||
(num_nodes, num_devices_per_node, distributed_init_method,
|
||||
all_stage_devices) = (
|
||||
initialize_ray_cluster(
|
||||
address='local',
|
||||
pipeline_parallel_size=args.pipeline_parallel_size,
|
||||
tensor_parallel_size=args.tensor_parallel_size))
|
||||
|
||||
# Create a server.
|
||||
server = Server(
|
||||
model=args.model,
|
||||
model_path=args.model_path,
|
||||
use_dummy_weights=args.use_dummy_weights,
|
||||
pipeline_parallel_size=args.pipeline_parallel_size,
|
||||
tensor_parallel_size=args.tensor_parallel_size,
|
||||
block_size=args.block_size,
|
||||
dtype=args.dtype,
|
||||
seed=args.seed,
|
||||
swap_space=args.swap_space,
|
||||
max_num_batched_tokens=args.max_num_batched_tokens,
|
||||
max_num_sequences=args.max_num_sequences,
|
||||
num_nodes=num_nodes,
|
||||
num_devices_per_node=num_devices_per_node,
|
||||
distributed_init_method=distributed_init_method,
|
||||
all_stage_devices=all_stage_devices,
|
||||
gpu_memory=get_gpu_memory(),
|
||||
cpu_memory=get_cpu_memory(),
|
||||
)
|
||||
|
||||
# Create a frontend.
|
||||
frontend = SimpleFrontend(
|
||||
model_name=args.model,
|
||||
block_size=args.block_size,
|
||||
)
|
||||
sampling_params_dict = {
|
||||
'n': args.n,
|
||||
'temperature': 0.0 if args.use_beam_search else 1.0,
|
||||
'top_p': 1.0,
|
||||
'use_beam_search': args.use_beam_search,
|
||||
'stop_token_ids': set(),
|
||||
'max_num_steps': args.output_len,
|
||||
}
|
||||
sampling_params = SamplingParams.from_dict(sampling_params_dict)
|
||||
print(sampling_params)
|
||||
input_token_ids = [0] * args.input_len
|
||||
|
||||
def profile_step(profile=False):
|
||||
if profile:
|
||||
torch.cuda.cudart().cudaProfilerStart()
|
||||
for _ in range(args.batch_size):
|
||||
frontend._add_query(input_token_ids, sampling_params)
|
||||
server.add_sequence_groups(frontend.get_inputs())
|
||||
start_time = time.time()
|
||||
while True:
|
||||
server.step()
|
||||
if not server.has_unfinished_requests():
|
||||
break
|
||||
end_time = time.time()
|
||||
latency = end_time - start_time
|
||||
if profile:
|
||||
torch.cuda.cudart().cudaProfilerStop()
|
||||
return latency
|
||||
|
||||
print("Warm up step")
|
||||
profile_step()
|
||||
|
||||
# Benchmark.
|
||||
latencies = []
|
||||
for _ in tqdm(range(3), desc="Profile step"):
|
||||
latencies.append(profile_step())
|
||||
print(f'Avg latency: {np.mean(latencies)} seconds')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser(description='CacheFlow simple server.')
|
||||
parser = add_server_arguments(parser)
|
||||
parser.add_argument('--input-len', type=int, default=32)
|
||||
parser.add_argument('--output-len', type=int, default=128)
|
||||
parser.add_argument('--batch-size', type=int, default=8)
|
||||
parser.add_argument('--n', type=int, default=1)
|
||||
parser.add_argument('--use-beam-search', action='store_true')
|
||||
args = parser.parse_args()
|
||||
args.max_num_batched_tokens = max(
|
||||
args.max_num_batched_tokens, args.batch_size * args.input_len)
|
||||
print(args)
|
||||
main(args)
|
@ -1,290 +0,0 @@
|
||||
import argparse
|
||||
import logging
|
||||
import os
|
||||
import pickle
|
||||
import time
|
||||
from typing import List
|
||||
|
||||
from tqdm import tqdm
|
||||
from transformers import AutoConfig
|
||||
|
||||
from benchmark.trace import generate_text_completion_requests
|
||||
from cacheflow.master.simple_frontend import SimpleFrontend
|
||||
from cacheflow.master.server import (Server, add_server_arguments,
|
||||
initialize_ray_cluster)
|
||||
from cacheflow.sampling_params import SamplingParams
|
||||
from cacheflow.utils import get_gpu_memory, get_cpu_memory
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def main(args: argparse.Namespace):
|
||||
assert args.pipeline_parallel_size == 1, (
|
||||
'Pipeline parallelism is not supported yet.')
|
||||
|
||||
(num_nodes, num_devices_per_node, distributed_init_method,
|
||||
all_stage_devices) = (
|
||||
initialize_ray_cluster(
|
||||
address='local',
|
||||
pipeline_parallel_size=args.pipeline_parallel_size,
|
||||
tensor_parallel_size=args.tensor_parallel_size))
|
||||
|
||||
# Create a server.
|
||||
server = Server(
|
||||
model=args.model,
|
||||
model_path=args.model_path,
|
||||
use_dummy_weights=args.use_dummy_weights,
|
||||
pipeline_parallel_size=args.pipeline_parallel_size,
|
||||
tensor_parallel_size=args.tensor_parallel_size,
|
||||
block_size=args.block_size,
|
||||
dtype=args.dtype,
|
||||
seed=args.seed,
|
||||
swap_space=args.swap_space,
|
||||
max_num_batched_tokens=args.max_num_batched_tokens,
|
||||
max_num_sequences=args.max_num_sequences,
|
||||
num_nodes=num_nodes,
|
||||
num_devices_per_node=num_devices_per_node,
|
||||
distributed_init_method=distributed_init_method,
|
||||
all_stage_devices=all_stage_devices,
|
||||
gpu_memory=get_gpu_memory(),
|
||||
cpu_memory=get_cpu_memory(),
|
||||
collect_stats=True,
|
||||
do_memory_analysis=args.do_memory_analysis,
|
||||
)
|
||||
|
||||
# Create a frontend.
|
||||
frontend = SimpleFrontend(
|
||||
model_name=args.model,
|
||||
block_size=args.block_size,
|
||||
)
|
||||
# Generate requests.
|
||||
requests = generate_text_completion_requests(
|
||||
args.dataset,
|
||||
args.request_rate,
|
||||
args.duration,
|
||||
args.seed,
|
||||
args.n1,
|
||||
args.n2,
|
||||
args.n3,
|
||||
args.n4,
|
||||
args.n6,
|
||||
args.n2_beam,
|
||||
args.n4_beam,
|
||||
args.n6_beam,
|
||||
args.n8_beam,
|
||||
)
|
||||
|
||||
# Warm up.
|
||||
logger.info('Warming up.')
|
||||
num_warmup_requests = 8
|
||||
warmup_input_len = 8
|
||||
warmup_output_len = 32
|
||||
warmup_sampling_params = SamplingParams(
|
||||
n=1,
|
||||
temperature=1.0,
|
||||
top_p=0.99,
|
||||
max_num_steps=warmup_output_len,
|
||||
use_beam_search=False,
|
||||
stop_token_ids=set(),
|
||||
num_logprobs=0,
|
||||
context_window_size=None,
|
||||
)
|
||||
for _ in range(num_warmup_requests):
|
||||
frontend._add_query([0] * warmup_input_len, warmup_sampling_params)
|
||||
server.add_sequence_groups(frontend.get_inputs())
|
||||
while True:
|
||||
server.step()
|
||||
if not server.has_unfinished_requests():
|
||||
break
|
||||
|
||||
# Start benchmarking.
|
||||
logger.info('Start benchmarking.')
|
||||
# Initialize tqdm.
|
||||
pbar = tqdm(total=len(requests), desc='Finished requests')
|
||||
|
||||
finished = []
|
||||
server.scheduler.reset_stats()
|
||||
start_time = time.time()
|
||||
while True:
|
||||
now = time.time()
|
||||
if args.timeout is not None and now - start_time > args.timeout:
|
||||
logger.info('Timeout. Stop benchmarking.')
|
||||
break
|
||||
|
||||
while requests:
|
||||
if requests[0][0] <= now - start_time:
|
||||
request_time, input_tokens, sampling_params = requests.pop(0)
|
||||
frontend._add_query(
|
||||
input_tokens, sampling_params, arrival_time=start_time + request_time)
|
||||
else:
|
||||
break
|
||||
server.add_sequence_groups(frontend.get_inputs())
|
||||
updated_seq_groups = server.step()
|
||||
|
||||
now = time.time()
|
||||
for seq_group in updated_seq_groups:
|
||||
if not seq_group.is_finished():
|
||||
continue
|
||||
arrival_time = seq_group.arrival_time
|
||||
finish_time = now
|
||||
for seq in seq_group.get_seqs():
|
||||
seq_len = seq.get_len()
|
||||
output_len = seq_len - seq.prompt_len
|
||||
finished.append({
|
||||
'group_id': seq_group.group_id,
|
||||
'seq_id': seq.seq_id,
|
||||
'arrival_time': arrival_time,
|
||||
'finish_time': finish_time,
|
||||
'prompt_len': seq.prompt_len,
|
||||
'output_len': output_len,
|
||||
})
|
||||
pbar.update(1)
|
||||
|
||||
if not (requests or server.has_unfinished_requests()):
|
||||
break
|
||||
pbar.close()
|
||||
logger.info('Finish benchmarking. Saving stats.')
|
||||
server.scheduler.save_stats(args.output_dir)
|
||||
with open(os.path.join(args.output_dir, 'sequences.pkl'), 'wb') as f:
|
||||
pickle.dump(finished, f)
|
||||
logger.info('Done.')
|
||||
|
||||
|
||||
def get_model_name(model: str) -> str:
|
||||
OPT_MODELS = [
|
||||
'opt-125m',
|
||||
'opt-350m',
|
||||
'opt-1.3b',
|
||||
'opt-2.7b',
|
||||
'opt-6.7b',
|
||||
'opt-13b',
|
||||
'opt-30b',
|
||||
'opt-66b',
|
||||
'opt-175b',
|
||||
]
|
||||
for opt_model in OPT_MODELS:
|
||||
if opt_model in model:
|
||||
return opt_model
|
||||
|
||||
config = AutoConfig.from_pretrained(model)
|
||||
assert config.model_type == 'llama'
|
||||
hidden_size = config.hidden_size
|
||||
if hidden_size == 4096:
|
||||
return 'llama-7b'
|
||||
elif hidden_size == 5120:
|
||||
return 'llama-13b'
|
||||
elif hidden_size == 6656:
|
||||
return 'llama-30b'
|
||||
elif hidden_size == 8192:
|
||||
return 'llama-65b'
|
||||
else:
|
||||
raise ValueError(f'Unknown model: {model}')
|
||||
|
||||
|
||||
def get_dataset_name(dataset: str) -> str:
|
||||
if 'sharegpt' in dataset.lower():
|
||||
return 'sharegpt'
|
||||
elif 'alpaca' in dataset.lower():
|
||||
return 'alpaca'
|
||||
else:
|
||||
raise ValueError(f'Unknown dataset: {dataset}')
|
||||
|
||||
|
||||
def get_sampling_dir_name(
|
||||
n1: float,
|
||||
n2: float,
|
||||
n3: float,
|
||||
n4: float,
|
||||
n6: float,
|
||||
n2_beam: float,
|
||||
n4_beam: float,
|
||||
n6_beam: float,
|
||||
n8_beam: float,
|
||||
) -> str:
|
||||
method = ''
|
||||
if n1 > 0.0:
|
||||
method = 'n1' if n1 == 1.0 else method + f'n1-{n1}-'
|
||||
if n2 > 0.0:
|
||||
method = 'n2' if n2 == 1.0 else method + f'n2-{n2}-'
|
||||
if n3 > 0.0:
|
||||
method = 'n3' if n3 == 1.0 else method + f'n3-{n3}-'
|
||||
if n4 > 0.0:
|
||||
method = 'n4' if n4 == 1.0 else method + f'n4-{n4}-'
|
||||
if n6 > 0.0:
|
||||
method = 'n6' if n6 == 1.0 else method + f'n6-{n6}-'
|
||||
if n2_beam > 0.0:
|
||||
method = 'n2-beam' if n2_beam == 1.0 else method + f'n2-beam-{n2_beam}-'
|
||||
if n4_beam > 0.0:
|
||||
method = 'n4-beam' if n4_beam == 1.0 else method + f'n4-beam-{n4_beam}-'
|
||||
if n6_beam > 0.0:
|
||||
method = 'n6-beam' if n6_beam == 1.0 else method + f'n6-beam-{n6_beam}-'
|
||||
if n8_beam > 0.0:
|
||||
method = 'n8-beam' if n8_beam == 1.0 else method + f'n8-beam-{n8_beam}-'
|
||||
return method[:-1] if method.endswith('-') else method
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser(description='CacheFlow simple server.')
|
||||
parser = add_server_arguments(parser)
|
||||
parser.add_argument('--output-dir', type=str, help='path to output directory', default=None)
|
||||
|
||||
parser.add_argument('--dataset', type=str, help='path to dataset', required=True)
|
||||
parser.add_argument('--request-rate', type=float, help='reqs/sec', required=True)
|
||||
parser.add_argument('--duration', type=int, help='duration in seconds', required=True)
|
||||
parser.add_argument('--do-memory-analysis', action='store_true',
|
||||
help='do memory analysis (This will lower the throughput. Use this only for analysis.)')
|
||||
parser.add_argument('--timeout', type=int, help='time out in seconds', default=None)
|
||||
|
||||
parser.add_argument('--n1', type=float, help='ratio of requests with n=1', default=0.0)
|
||||
parser.add_argument('--n2', type=float, help='ratio of requests with n=2', default=0.0)
|
||||
parser.add_argument('--n3', type=float, help='ratio of requests with n=3', default=0.0)
|
||||
parser.add_argument('--n4', type=float, help='ratio of requests with n=4', default=0.0)
|
||||
parser.add_argument('--n6', type=float, help='ratio of requests with n=6', default=0.0)
|
||||
parser.add_argument('--n2-beam', type=float, help='ratio of requests with n=2 & beam search', default=0.0)
|
||||
parser.add_argument('--n4-beam', type=float, help='ratio of requests with n=4 & beam search', default=0.0)
|
||||
parser.add_argument('--n6-beam', type=float, help='ratio of requests with n=6 & beam search', default=0.0)
|
||||
parser.add_argument('--n8-beam', type=float, help='ratio of requests with n=8 & beam search', default=0.0)
|
||||
args = parser.parse_args()
|
||||
if args.n1 + args.n2 + args.n3 + args.n4 + args.n6 + args.n2_beam + args.n4_beam + args.n6_beam + args.n8_beam != 1.0:
|
||||
raise ValueError('The ratios of requests must sum to 1.')
|
||||
|
||||
model_name = get_model_name(args.model)
|
||||
dataset_name = get_dataset_name(args.dataset)
|
||||
if 'opt' in model_name:
|
||||
if 'opt' not in args.dataset.lower():
|
||||
raise ValueError(f'OPT models can only be used with OPT datasets.')
|
||||
elif 'llama' in model_name:
|
||||
if 'llama' not in args.dataset.lower():
|
||||
raise ValueError(f'Llama models can only be used with Llama datasets.')
|
||||
|
||||
dataset_name = 'sharegpt' if 'sharegpt' in args.dataset else 'alpaca'
|
||||
sample_dir = get_sampling_dir_name(
|
||||
args.n1, args.n2, args.n3, args.n4, args.n6, args.n2_beam, args.n4_beam, args.n6_beam, args.n8_beam)
|
||||
if args.output_dir is None:
|
||||
args.output_dir = os.path.join(
|
||||
'../exp',
|
||||
dataset_name,
|
||||
f'{model_name}-tp{args.tensor_parallel_size}',
|
||||
sample_dir,
|
||||
'cacheflow',
|
||||
f'block{args.block_size}',
|
||||
f'req-rate-{args.request_rate}',
|
||||
f'seed{args.seed}',
|
||||
f'duration-{args.duration}',
|
||||
)
|
||||
os.makedirs(args.output_dir, exist_ok=True)
|
||||
|
||||
# Set up logging.
|
||||
logging.basicConfig(
|
||||
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
|
||||
datefmt="%m/%d/%Y %H:%M:%S",
|
||||
level=logging.INFO,
|
||||
handlers=[
|
||||
logging.StreamHandler(),
|
||||
logging.FileHandler(os.path.join(args.output_dir, 'log.txt')),
|
||||
],
|
||||
)
|
||||
logger.info(args)
|
||||
|
||||
main(args)
|
@ -1,116 +0,0 @@
|
||||
import pickle
|
||||
import random
|
||||
from typing import List, Tuple
|
||||
|
||||
import numpy as np
|
||||
|
||||
from cacheflow.sampling_params import SamplingParams
|
||||
|
||||
|
||||
def generate_text_completion_requests(
|
||||
dataset: str,
|
||||
request_rate: float,
|
||||
duration: int,
|
||||
seed: int,
|
||||
n1: float = 0.0,
|
||||
n2: float = 0.0,
|
||||
n3: float = 0.0,
|
||||
n4: float = 0.0,
|
||||
n6: float = 0.0,
|
||||
n2_beam: float = 0.0,
|
||||
n4_beam: float = 0.0,
|
||||
n6_beam: float = 0.0,
|
||||
n8_beam: float = 0.0,
|
||||
max_seq_len: int = 2048,
|
||||
time_quantum: int = 10,
|
||||
) -> List[Tuple[float, List[int], SamplingParams]]:
|
||||
random.seed(seed)
|
||||
np.random.seed(seed)
|
||||
|
||||
# Generate timestamps for requests using Poisson distribution.
|
||||
lam = request_rate * (time_quantum / 1000)
|
||||
quantums_per_sec = 1000 / time_quantum
|
||||
arrival_times = np.random.poisson(
|
||||
lam=lam, size=int(duration * quantums_per_sec))
|
||||
timestamps = []
|
||||
for i, n in enumerate(arrival_times):
|
||||
timestamps += [i * (time_quantum / 1000)] * n
|
||||
|
||||
# Load and shuffle the dataset.
|
||||
num_requests = len(timestamps)
|
||||
with open(dataset, 'rb') as f:
|
||||
data = pickle.load(f)
|
||||
|
||||
filtered = []
|
||||
for pair in data:
|
||||
input_tokens, output_tokens = pair
|
||||
input_len = len(input_tokens)
|
||||
output_len = len(output_tokens)
|
||||
# Filter out too long sequences.
|
||||
if input_len + output_len < max_seq_len:
|
||||
# Output tokens are not needed for the benchmark.
|
||||
filtered.append((input_tokens, output_len))
|
||||
|
||||
data = []
|
||||
while len(data) < num_requests:
|
||||
data += filtered
|
||||
data = data[:num_requests]
|
||||
# Shuffle the data.
|
||||
assert len(data) == len(timestamps)
|
||||
random.shuffle(data)
|
||||
|
||||
random_sampling_params_dict = {
|
||||
'temperature': 1.0,
|
||||
'top_p': 1.0,
|
||||
'use_beam_search': False,
|
||||
'stop_token_ids': set(),
|
||||
'num_logprobs': 0,
|
||||
'context_window_size': None,
|
||||
}
|
||||
beam_search_params_dict = {
|
||||
'temperature': 0.0,
|
||||
'top_p': 1.0,
|
||||
'use_beam_search': True,
|
||||
'stop_token_ids': set(),
|
||||
'num_logprobs': 0,
|
||||
'context_window_size': None,
|
||||
}
|
||||
|
||||
# Generate requests based on the sampling parameter ratio.
|
||||
requests = []
|
||||
assert n1 + n2 + n3 + n4 + n6 + n2_beam + n4_beam + n6_beam + n8_beam == 1.0
|
||||
cum_sum = 0
|
||||
for timestamp, pair in zip(timestamps, data):
|
||||
input_tokens, output_len = pair
|
||||
if cum_sum < n1 * num_requests:
|
||||
sampling_params = SamplingParams(
|
||||
n=1, max_num_steps=output_len, **random_sampling_params_dict)
|
||||
elif cum_sum < (n1 + n2) * num_requests:
|
||||
sampling_params = SamplingParams(
|
||||
n=2, max_num_steps=output_len, **random_sampling_params_dict)
|
||||
elif cum_sum < (n1 + n2 + n3) * num_requests:
|
||||
sampling_params = SamplingParams(
|
||||
n=3, max_num_steps=output_len, **random_sampling_params_dict)
|
||||
elif cum_sum < (n1 + n2 + n3 + n4) * num_requests:
|
||||
sampling_params = SamplingParams(
|
||||
n=4, max_num_steps=output_len, **random_sampling_params_dict)
|
||||
elif cum_sum < (n1 + n2 + n3 + n4 + n6) * num_requests:
|
||||
sampling_params = SamplingParams(
|
||||
n=6, max_num_steps=output_len, **random_sampling_params_dict)
|
||||
elif cum_sum < (n1 + n2 + n3 + n4 + n6 + n2_beam) * num_requests:
|
||||
sampling_params = SamplingParams(
|
||||
n=2, max_num_steps=output_len, **beam_search_params_dict)
|
||||
elif cum_sum < (n1 + n2 + n3 + n4 + n6 + n2_beam + n4_beam) * num_requests:
|
||||
sampling_params = SamplingParams(
|
||||
n=4, max_num_steps=output_len, **beam_search_params_dict)
|
||||
elif cum_sum < (n1 + n2 + n3 + n4 + n6 + n2_beam + n4_beam + n6_beam) * num_requests:
|
||||
sampling_params = SamplingParams(
|
||||
n=6, max_num_steps=output_len, **beam_search_params_dict)
|
||||
elif cum_sum < (n1 + n2 + n3 + n4 + n6 + n2_beam + n4_beam + n6_beam + n8_beam) * num_requests:
|
||||
sampling_params = SamplingParams(
|
||||
n=8, max_num_steps=output_len, **beam_search_params_dict)
|
||||
else:
|
||||
raise ValueError('Invalid request ratio.')
|
||||
cum_sum += 1
|
||||
requests.append((timestamp, input_tokens, sampling_params))
|
||||
return requests
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user