from contextlib import asynccontextmanager

from fastapi import FastAPI
from starlette.middleware.cors import CORSMiddleware

from api import ai, chat, sys


async def startup():
    pass


async def shutdown():
    pass


@asynccontextmanager
async def lifespan(app: FastAPI):
    # Load the ML model
    await startup()
    yield
    # Clean up the ML models and release the resources
    await shutdown()


app = FastAPI(lifespan=lifespan)

# 添加 CORS 中间件（可选）
app.add_middleware(
    CORSMiddleware,
    allow_origins=["http://localhost:5173"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

app.include_router(ai.router)
app.include_router(chat.router)
app.include_router(sys.router)
if __name__ == '__main__':
    import uvicorn

    uvicorn.run(app, host="0.0.0.0", port=10001)
