from fastapi import FastAPI
from contextlib import asynccontextmanager
import uvicorn
from config import APP_CONFIG
from models.dependencies import get_dependencies, model_manager  

from routes.local import models
# from routes.local.no_stream import  whisperx
from routes.local.no_stream import whisper, ollama, chattts
from routes.local.streaming import ollama_stream, whisper_stream, chattts_stream

from utils.logger import setup_logger
from fastapi import Depends
from routes.local.streaming.stream import router as stream_router
from app.app_main import init_app_state, get_app_state

app = FastAPI(
    title="Multi-Modal AI Services",
    description="API service for WhisperX, Ollama Vision, and ChatTTS"
)

logger = setup_logger()

# 初始化应用状态
init_app_state(app)

# Lifespan event handler
@asynccontextmanager
async def lifespan(app: FastAPI):
    logger.info("Starting up Multi-Modal AI Services...")
    # 确保app.state已初始化
    if not hasattr(app, 'state') or app.state is None:
        app.state = await get_app_state()
        logger.info("应用状态已在lifespan中初始化")
    yield
    logger.info("Shutting down Multi-Modal AI Services...")
    # 清理资源在app_main.py的shutdown_event中处理

app.lifespan = lifespan

# Include routes with dependencies
app.include_router(whisper.router_whisper)
app.include_router(ollama.router_ollama)
app.include_router(chattts.router_chattts)
app.include_router(models.router)

# Include streaming routes
app.include_router(ollama_stream.router_ollama)
app.include_router(whisper_stream.router_whisper)
app.include_router(chattts_stream.router_chattts)
app.include_router(stream_router, prefix="/stream")


if __name__ == "__main__":
    uvicorn.run(
        app,
        host=APP_CONFIG.host,
        port=APP_CONFIG.port,
        log_level=APP_CONFIG.log_level
        # timeout_keep_alive=APP_CONFIG.timeout_keep_alive
    )

