import asyncio
from contextlib import asynccontextmanager

from logger import init_logger
from args import ServerArgs


logger = init_logger('openai.api_server')

async def run_server(args, **uvicorn_kwargs) -> None:
    logger.info("vLLM API server version %s", VLLM_VERSION)
    logger.info("args: %s", args)

    async with build_async_engine_client(args) as async_engine_client:
        app = await init_app(async_engine_client, args)

        shutdown_task = await serve_http(
            app,
            host=ServerArgs.host,
            port=ServerArgs.port,
            log_level=ServerArgs.log_level,
            timeout_keep_alive=ServerArgs.timeout_keep_alive
        )

    # NB: Await server shutdown only after the backend context is exited
    await shutdown_task


def main():
    asyncio.run(run_server())
