# Copyright (c) 2024-present AI-Labs

import uvicorn
import torch
import typer

from contextlib import asynccontextmanager
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from fastapi.staticfiles import StaticFiles

from configs import config

@asynccontextmanager
async def lifespan(app: FastAPI):
    yield
    if torch.cuda.is_available():
        torch.cuda.empty_cache()
        torch.cuda.ipc_collect()

shell_app = typer.Typer()

app = FastAPI(lifespan=lifespan)

"""
定义允许跨域请求
"""
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

"""
定义静态资源路径
"""
app.mount("/statics", StaticFiles(directory="statics"), name="statics")

@shell_app.command()
def chatvllm():
    """
    启动大模型对话服务
    """
    from modules.chat import chatvllm_service
    app.include_router(chatvllm_service.router)

    uvicorn.run(app, host=config.server.host, port=config.server.port, workers=1)

@shell_app.command()
def funasr():
    """
    启动自动语音识别服务
    """
    from modules.audio import funasr_service_npu
    app.include_router(funasr_service_npu.router)

    uvicorn.run(app, host=config.server.host, port=config.server.port, workers=1)

@shell_app.command()
def auto():
    """
    根据配置文件，自动识别启动哪些服务
    """
    if config.service.chatvllm.enable == True:
        from modules.chat import chatvllm_service
        app.include_router(chatvllm_service.router)

    if config.service.funasr.enable == True:
        from modules.audio import funasr_service
        app.include_router(funasr_service.router)

    uvicorn.run(app, host=config.server.host, port=config.server.port, workers=1)

if __name__ == '__main__':
    shell_app()
