mirror of
https://github.com/vllm-project/vllm.git
synced 2025-10-20 14:53:52 +08:00
[misc][frontend] log all available endpoints (#6195)
Co-authored-by: Cody Yu <hao.yu.cody@gmail.com>
This commit is contained in:
@ -16,10 +16,13 @@ from fastapi.responses import JSONResponse, Response, StreamingResponse
|
||||
|
||||
from vllm.engine.arg_utils import AsyncEngineArgs
|
||||
from vllm.engine.async_llm_engine import AsyncLLMEngine
|
||||
from vllm.logger import init_logger
|
||||
from vllm.sampling_params import SamplingParams
|
||||
from vllm.usage.usage_lib import UsageContext
|
||||
from vllm.utils import FlexibleArgumentParser, random_uuid
|
||||
|
||||
logger = init_logger("vllm.entrypoints.api_server")
|
||||
|
||||
TIMEOUT_KEEP_ALIVE = 5 # seconds.
|
||||
app = FastAPI()
|
||||
engine = None
|
||||
@ -107,6 +110,14 @@ if __name__ == "__main__":
|
||||
engine_args, usage_context=UsageContext.API_SERVER)
|
||||
|
||||
app.root_path = args.root_path
|
||||
|
||||
logger.info("Available routes are:")
|
||||
for route in app.routes:
|
||||
if not hasattr(route, 'methods'):
|
||||
continue
|
||||
methods = ', '.join(route.methods)
|
||||
logger.info("Route: %s, Methods: %s", route.path, methods)
|
||||
|
||||
uvicorn.run(app,
|
||||
host=args.host,
|
||||
port=args.port,
|
||||
|
@ -240,6 +240,14 @@ if __name__ == "__main__":
|
||||
openai_serving_embedding = OpenAIServingEmbedding(engine, model_config,
|
||||
served_model_names)
|
||||
app.root_path = args.root_path
|
||||
|
||||
logger.info("Available routes are:")
|
||||
for route in app.routes:
|
||||
if not hasattr(route, 'methods'):
|
||||
continue
|
||||
methods = ', '.join(route.methods)
|
||||
logger.info("Route: %s, Methods: %s", route.path, methods)
|
||||
|
||||
uvicorn.run(app,
|
||||
host=args.host,
|
||||
port=args.port,
|
||||
|
Reference in New Issue
Block a user