Skip to content

vllm.entrypoints.openai.models.api_router

logger module-attribute

logger = init_logger(__name__)

router module-attribute

router = APIRouter()

attach_router

attach_router(app: FastAPI)
Source code in vllm/entrypoints/openai/models/api_router.py
def attach_router(app: FastAPI):
    app.include_router(router)

models

models(request: Request) -> OpenAIServingModels
Source code in vllm/entrypoints/openai/models/api_router.py
def models(request: Request) -> OpenAIServingModels:
    return request.app.state.openai_serving_models

show_available_models async

show_available_models(raw_request: Request)
Source code in vllm/entrypoints/openai/models/api_router.py
@router.get("/v1/models")
async def show_available_models(raw_request: Request):
    handler = models(raw_request)

    models_ = await handler.show_available_models()
    return JSONResponse(content=models_.model_dump())