import threading
import time
import torch
import logging
import asyncio
from typing import Dict, Any
from fastapi import HTTPException
import os
import shutil
from ollama import Client
from utils.logger import setup_logger
logger = setup_logger()


class ModelManager:
    def __init__(self):
        self.models: Dict[str, Any] = {}
        self.status: Dict[str, Dict[str, Any]] = {
            "whisperx": {"loaded": False, "lock": threading.Lock(), "usage_count": 0, "last_used": 0},
            "ollama": {"loaded": False, "lock": threading.Lock(), "usage_count": 0, "last_used": 0},
            "chattts": {"loaded": False, "lock": threading.Lock(), "usage_count": 0, "last_used": 0},
            "whisper": {"loaded": False, "lock": threading.Lock(), "usage_count": 0, "last_used": 0}
        }
        self.timeout = 300  # 5分钟超时（秒）
        self.loop = asyncio.get_event_loop()

    async def _load_whisperx(self, config, app_state):
        import whisperx
        if not os.path.exists(config.whisperx_model_dir):
            logger.error(f"WhisperX cache directory {config.whisperx_model_dir} does not exist")
            raise HTTPException(500, f"WhisperX cache directory {config.whisperx_model_dir} not found")
        model = await whisperx.load_model(
                config.whisperx_model,
                device=config.whisperx_device,
                download_root=config.whisperx_model_dir,
             )
        return model


    async def _load_ollama(self, config, app_state):
        from ollama import AsyncClient

        app_state.ollama_client = AsyncClient(host=config.ollama_host)
        model_name_to_load = config.ollama_chat_model
        try:
            try:
                # 设置 show 的超时
                await asyncio.wait_for(
                    app_state.ollama_client.show(model_name_to_load),
                    timeout=10.0
                )
            except asyncio.TimeoutError:
                logger.debug(f"检查模型超时，尝试拉取...")
                raise  # 触发 pull 逻辑

        except Exception as e:
            logger.debug(f"Model {model_name_to_load} 未找到，正在拉取...")
            try:
                # 设置 pull 的超时（例如 300 秒）
                await asyncio.wait_for(
                    app_state.ollama_client.pull(model_name_to_load),
                    timeout=300.0
                )
                await app_state.ollama_client.show(model_name_to_load)
                logger.debug(f"Ollama 模型 {model_name_to_load} 已就绪")
            except asyncio.TimeoutError:
                logger.debug("拉取模型超时，请检查网络或模型名称")
                raise


    async def _load_chattts(self, config, app_state):
        import ChatTTS
        app_state.chattts_model = ChatTTS.Chat()
        app_state.chattts_model.load(source="local", device='cuda', custom_path=config.chattts_model_dir)


    async def _load_whisper(self, config, app_state):
        import whisper
        if not os.path.exists(config.whisper_model_dir):
            logger.error(f"whisper cache directory {config.whisper_model_dir} does not exist")
            raise HTTPException(500, f"whisper cache directory {config.whisper_model_dir} not found")
        app_state.whisper_model = whisper.load_model(
                config.whisper_model,
                device=config.whisper_device,
                download_root=config.whisper_model_dir,
            )

        return app_state.whisper_model

    async def load_model(self, model_name: str, app_state: Any, config: Any):
        from types import SimpleNamespace
        """Load a model on-demand with thread safety."""
        with self.status[model_name]["lock"]:
            if not self.status[model_name]["loaded"]:
                logger.info(f"Loading {model_name} model...")
                # 用户函数传递字典 不是传递对象。。之前是传递对象
                if type(config) == dict:
                    config= SimpleNamespace(**config.get("config", {}))

                logger.debug(f"Loading {model_name} model with config: {config}  config_type{type(config)}")
                try:
                    if model_name == "whisperx":
                        app_state.whisperx_model = await self._load_whisperx(config, app_state)
                    elif model_name == "ollama":
                        await self._load_ollama(config, app_state)
                    elif model_name == "chattts":
                        await self._load_chattts(config, app_state)
                    elif model_name == "whisper":
                        app_state.whisper_model = await self._load_whisper(config, app_state)
                    else:
                       logger.error(f"Unknown model 不支持的模型: {model_name}")
                    self.status[model_name]["loaded"] = True
                    self.status[model_name]["last_used"] = time.time()
                    logger.info(f"{model_name} model loaded")
                except Exception as e:
                    logger.error(f"Failed to load {model_name}: {str(e)}")
                    raise HTTPException(500, f"Failed to load {model_name}: {str(e)}")

    def unload_model(self, model_name: str, app_state: Any, timeout: int = 300):
        """Unload a model if not in use, with timeout."""
        with self.status[model_name]["lock"]:
            if self.status[model_name]["loaded"]:
                logger.info(f"Unloading {model_name} model...")
                start_time = time.time()
                while self.status[model_name]["usage_count"] > 0:
                    if time.time() - start_time > timeout:
                        logger.debug(f"Timeout waiting for {model_name} model to be free")
                        return
                    time.sleep(1)
                try:
                    if model_name == "whisperx" and hasattr(app_state, "whisperx_model"):
                        del app_state.whisperx_model
                    elif model_name == "ollama" and hasattr(app_state, "ollama_client"):
                        del app_state.ollama_client
                    elif model_name == "chattts" and hasattr(app_state, "chattts_model"):
                        del app_state.chattts_model
                    elif model_name == "whisper" and hasattr(app_state, "whisper_model"):
                        del app_state.whisper_model
                    torch.cuda.empty_cache()
                    self.status[model_name]["loaded"] = False
                    self.status[model_name]["last_used"] = 0
                    logger.info(f"{model_name} model unloaded")
                except Exception as e:
                    logger.error(f"Failed to unload {model_name}: {str(e)}")
                    raise HTTPException(500, f"Failed to unload {model_name}: {str(e)}")

    def increment_usage(self, model_name: str):
        """Increment usage count for a model and update last used time."""
        with self.status[model_name]["lock"]:
            self.status[model_name]["usage_count"] += 1
            self.status[model_name]["last_used"] = time.time()

    def decrement_usage(self, model_name: str):
        """Decrement usage count for a model and update last used time."""
        with self.status[model_name]["lock"]:
            self.status[model_name]["usage_count"] = max(0, self.status[model_name]["usage_count"] - 1)
            self.status[model_name]["last_used"] = time.time()

    async def cleanup_loop(self, app_state: Any):
        """Periodically check and unload models that haven't been used for 5 minutes."""
        while True:
            for model_name in self.status:
                with self.status[model_name]["lock"]:
                    if self.status[model_name]["loaded"] and self.status[model_name]["last_used"] > 0:
                        if time.time() - self.status[model_name]["last_used"] > self.timeout:
                            self.unload_model(model_name, app_state)
            await asyncio.sleep(60)  # 每分钟检查一次

    def start_cleanup_task(self, app_state: Any):
        """Start the cleanup task in the asyncio event loop."""
        self.loop.create_task(self.cleanup_loop(app_state))

    def get_status(self) -> Dict[str, bool]:
        """Get the loaded status of all models."""
        return {name: status["loaded"] for name, status in self.status.items()}