89 lines
3.7 KiB
Plaintext
89 lines
3.7 KiB
Plaintext
# See https://help.github.com/articles/about-codeowners/
|
|
# for more info about CODEOWNERS file
|
|
|
|
# This lists cover the "core" components of vLLM that require careful review
|
|
/vllm/attention/backends/abstract.py @WoosukKwon @zhuohan123 @youkaichao @alexm-redhat @comaniac @njhill
|
|
/vllm/core @zhuohan123 @youkaichao @alexm-redhat @comaniac @njhill
|
|
/vllm/engine/llm_engine.py @zhuohan123 @youkaichao @alexm-redhat @comaniac @njhill
|
|
/vllm/executor/executor_base.py @zhuohan123 @youkaichao @alexm-redhat @comaniac @njhill
|
|
/vllm/worker/worker_base.py @zhuohan123 @youkaichao @alexm-redhat @comaniac @njhill
|
|
/vllm/worker/worker.py @zhuohan123 @youkaichao @alexm-redhat @comaniac @njhill
|
|
/vllm/model_executor/layers/sampler.py @zhuohan123 @youkaichao @alexm-redhat @comaniac @njhill
|
|
/vllm/model_executor/layers/quantization @mgoin @robertgshaw2-redhat @tlrmchlsmth @yewentao256
|
|
/vllm/model_executor/layers/mamba @tdoublep
|
|
/vllm/multimodal @DarkLight1337 @ywang96
|
|
/vllm/vllm_flash_attn @LucasWilkinson
|
|
/vllm/lora @jeejeelee
|
|
/vllm/reasoning @aarnphm
|
|
/vllm/entrypoints @aarnphm
|
|
/vllm/compilation @zou3519 @youkaichao @ProExpertProg
|
|
CMakeLists.txt @tlrmchlsmth @LucasWilkinson
|
|
|
|
# Any change to the VllmConfig changes can have a large user-facing impact,
|
|
# so spam a lot of people
|
|
/vllm/config @simon-mo @WoosukKwon @youkaichao @robertgshaw2-redhat @mgoin @tlrmchlsmth @houseroad @hmellor @yewentao256 @ProExpertProg
|
|
|
|
# vLLM V1
|
|
/vllm/v1 @WoosukKwon @robertgshaw2-redhat @njhill @ywang96 @comaniac @alexm-redhat
|
|
/vllm/v1/structured_output @mgoin @russellb @aarnphm
|
|
/vllm/v1/attention/backends/triton_attn.py @tdoublep
|
|
|
|
# Test ownership
|
|
/.buildkite/lm-eval-harness @mgoin @simon-mo
|
|
/tests/async_engine @njhill @robertgshaw2-redhat @simon-mo
|
|
/tests/distributed/test_multi_node_assignment.py @youkaichao
|
|
/tests/distributed/test_pipeline_parallel.py @youkaichao
|
|
/tests/distributed/test_same_node.py @youkaichao
|
|
/tests/entrypoints @DarkLight1337 @robertgshaw2-redhat @simon-mo @aarnphm
|
|
/tests/kernels @tlrmchlsmth @WoosukKwon @yewentao256
|
|
/tests/models @DarkLight1337 @ywang96
|
|
/tests/multimodal @DarkLight1337 @ywang96
|
|
/tests/prefix_caching @comaniac @KuntaiDu
|
|
/tests/quantization @mgoin @robertgshaw2-redhat @yewentao256
|
|
/tests/test_inputs.py @DarkLight1337 @ywang96
|
|
/tests/v1/entrypoints/llm/test_struct_output_generate.py @mgoin @russellb @aarnphm
|
|
/tests/v1/structured_output @mgoin @russellb @aarnphm
|
|
/tests/weight_loading @mgoin @youkaichao @yewentao256
|
|
/tests/lora @jeejeelee
|
|
/tests/models/language/generation/test_hybrid.py @tdoublep
|
|
|
|
# Docs
|
|
/docs @hmellor
|
|
mkdocs.yaml @hmellor
|
|
|
|
# CPU
|
|
/vllm/v1/worker/^cpu @bigPYJ1151
|
|
/csrc/cpu @bigPYJ1151
|
|
/vllm/platforms/cpu.py @bigPYJ1151
|
|
/cmake/cpu_extension.cmake @bigPYJ1151
|
|
/docker/Dockerfile.cpu @bigPYJ1151
|
|
|
|
# Intel GPU
|
|
/vllm/v1/worker/^xpu @jikunshang
|
|
/vllm/platforms/xpu.py @jikunshang
|
|
/docker/Dockerfile.xpu @jikunshang
|
|
|
|
# Qwen-specific files
|
|
/vllm/attention/backends/dual_chunk_flash_attn.py @sighingnow
|
|
/vllm/model_executor/models/qwen* @sighingnow
|
|
|
|
# Mistral-specific files
|
|
/vllm/model_executor/models/mistral*.py @patrickvonplaten
|
|
/vllm/model_executor/models/mixtral*.py @patrickvonplaten
|
|
/vllm/model_executor/models/voxtral*.py @patrickvonplaten
|
|
/vllm/model_executor/models/pixtral*.py @patrickvonplaten
|
|
/vllm/transformers_utils/configs/mistral.py @patrickvonplaten
|
|
/vllm/transformers_utils/tokenizers/mistral.py @patrickvonplaten
|
|
|
|
# Kernels
|
|
/vllm/attention/ops/chunked_prefill_paged_decode.py @tdoublep
|
|
/vllm/attention/ops/triton_unified_attention.py @tdoublep
|
|
|
|
# ROCm related: specify owner with write access to notify AMD folks for careful code review
|
|
/docker/Dockerfile.rocm* @gshtras
|
|
/vllm/v1/attention/backends/rocm*.py @gshtras
|
|
/vllm/v1/attention/backends/mla/rocm*.py @gshtras
|
|
/vllm/attention/ops/rocm*.py @gshtras
|
|
/vllm/model_executor/layers/fused_moe/rocm*.py @gshtras
|
|
|