"""
模型管理器
实现高级模型加载、预加载、智能切换和内存管理
"""

import asyncio
import time
import json
import os
from typing import Dict, List, Optional, Tuple, Any
from enum import Enum
from dataclasses import dataclass
from datetime import datetime
import threading

from loguru import logger
from app.core.resource_monitor import (
    get_resource_monitor, ResourceMonitor,
    get_model_resource_info, MODEL_SWITCH_THRESHOLD
)
# 注意：延迟导入以避免循环依赖，在需要时在函数内部导入
# 延迟导入以避免循环依赖
from config.model_config import (
    model_config_manager, ModelConfig, ModelBackend, ModelSize
)
# 本地定义ModelState枚举以避免循环导入
class ModelState(Enum):
    """模型状态枚举"""
    READY = "ready"
    LOADING = "loading"
    ERROR = "error"
    STANDBY = "standby"
    TOO_LARGE = "too_large"  # 模型过大，无法在当前资源下加载
from utils.exceptions import MaessBaseException as ModelError  # 使用通用异常类作为替代


class ModelManagerState(Enum):
    """模型管理器状态枚举"""
    INITIALIZING = "initializing"
    READY = "ready"
    PAUSED = "paused"
    ERROR = "error"


@dataclass
class ModelLoadInfo:
    """模型加载信息"""
    model_name: str
    backend: ModelBackend
    memory_mb: float
    load_time: float
    last_used: float
    request_count: int = 0
    avg_response_time: float = 0.0
    state: ModelState = ModelState.READY


class ModelPreloader:
    """模型预加载器
    负责根据使用模式和系统资源预加载模型
    """
    
    def __init__(self, resource_monitor: ResourceMonitor, config_dir: str = None):
        self._resource_monitor = resource_monitor
        self._config_dir = config_dir or os.path.join(os.path.dirname(__file__), "../config")
        self._preload_queue: asyncio.Queue = asyncio.Queue(maxsize=10)
        self._active = False
        self._preload_lock = asyncio.Lock()
        self._preload_history: Dict[str, Dict] = {}
        self._preload_thread: Optional[threading.Thread] = None
        
    async def initialize(self):
        """初始化预加载器"""
        self._active = True
        # 启动预加载工作线程
        asyncio.create_task(self._preload_worker())
        logger.info("模型预加载器初始化完成")
    
    async def _preload_worker(self):
        """预加载工作协程"""
        while self._active:
            try:
                # 从队列获取预加载任务
                model_name, backend = await self._preload_queue.get()
                
                # 检查是否可以加载模型
                can_load = await self._can_load_model(model_name)
                if can_load:
                    logger.info(f"开始预加载模型: {model_name} ({backend.value})")
                    await self._load_model(model_name, backend)
                else:
                    logger.warning(f"资源不足，跳过预加载模型: {model_name}")
                
                # 标记任务完成
                self._preload_queue.task_done()
                
                # 限制预加载频率
                await asyncio.sleep(2)
            
            except asyncio.CancelledError:
                break
            except Exception as e:
                logger.error(f"预加载工作线程错误: {str(e)}")
                # 标记任务完成
                if hasattr(self, '_preload_queue'):
                    try:
                        self._preload_queue.task_done()
                    except:
                        pass
                await asyncio.sleep(5)
    
    async def _can_load_model(self, model_name: str) -> bool:
        """检查是否可以加载指定模型"""
        # 获取模型资源信息
        resource_info = get_model_resource_info(model_name)
        if not resource_info:
            logger.warning(f"无法获取模型资源信息: {model_name}")
            return False
        
        # 检查资源使用情况
        available_memory = await self._resource_monitor.get_available_memory_mb()
        memory_usage = await self._resource_monitor.get_memory_usage()
        
        # 如果内存使用率高于80%或可用内存小于2GB，不预加载
        if memory_usage > 80 or available_memory < 2000:
            return False
        
        # 检查加载此模型后是否有足够内存余量
        required_memory = resource_info.get("memory_mb", 1024)
        safety_margin = 500  # 安全余量500MB
        
        return (available_memory - required_memory) > safety_margin
    
    async def _load_model(self, model_name: str, backend: ModelBackend):
        """预加载指定模型"""
        try:
            start_time = time.time()
            
            # 创建并初始化模型客户端
            if backend == ModelBackend.OLLAMA:
                from middleware.model_client import OllamaClient
                config = model_config_manager.get_config(f"ollama-{model_name.split(':')[0]}")
                if not config:
                    from config.model_config import OllamaConfig
                    config = OllamaConfig()
                    config.model_name = model_name
                    config.backend_config["model"] = model_name
                    config.name = f"ollama-{model_name.split(':')[0]}"
                    model_config_manager.register_config(config)
                
                client = OllamaClient(config=config)
                await client.initialize()
                
                # 进行一次轻量健康检查以加载模型
                await client.health_check()
                
                # 记录预加载信息
                end_time = time.time()
                resource_info = get_model_resource_info(model_name)
                self._preload_history[model_name] = {
                    "loaded": True,
                    "time": end_time - start_time,
                    "memory_mb": resource_info.get("memory_mb", 0) if resource_info else 0,
                    "timestamp": end_time
                }
                
                logger.info(f"模型预加载完成: {model_name}, 耗时: {end_time - start_time:.2f}秒")
                
        except Exception as e:
            logger.error(f"模型预加载失败: {model_name}, 错误: {str(e)}")
            self._preload_history[model_name] = {
                "loaded": False,
                "error": str(e),
                "timestamp": time.time()
            }
    
    async def schedule_preload(self, model_name: str, backend: ModelBackend, priority: int = 0):
        """安排模型预加载
        
        Args:
            model_name: 模型名称
            backend: 模型后端
            priority: 优先级 (0-9, 9最高)
        """
        if not self._active:
            return
        
        try:
            # 将预加载任务放入队列
            await self._preload_queue.put((model_name, backend))
            logger.debug(f"已安排预加载模型: {model_name} (优先级: {priority})")
        except asyncio.QueueFull:
            logger.warning(f"预加载队列已满，跳过预加载: {model_name}")
    
    async def preload_by_usage_prediction(self):
        """根据使用预测预加载模型"""
        # 获取历史使用模式
        usage_pattern = await self._analyze_usage_pattern()
        
        # 根据预测结果预加载模型
        for model_info in usage_pattern:
            model_name, backend, score = model_info
            if score > 0.5:  # 只有高概率使用的模型才预加载
                await self.schedule_preload(model_name, backend, priority=int(score * 10))
    
    async def _analyze_usage_pattern(self) -> List[Tuple[str, ModelBackend, float]]:
        """分析模型使用模式
        返回: [(模型名, 后端, 预测分数)]
        """
        # 这里可以实现更复杂的预测算法
        # 目前使用简单的基于时间的预测
        current_hour = datetime.now().hour
        
        # 基于时间的模型推荐
        if 9 <= current_hour <= 18:  # 工作时间
            # 工作时间可能需要更强大的模型
            return [
                ("llama3", ModelBackend.OLLAMA, 0.8),
                ("qwen2:7b-instruct-q4_K_M", ModelBackend.OLLAMA, 0.6)
            ]
        else:  # 非工作时间
            # 非工作时间可能使用较轻量的模型
            return [
                ("phi3:3.8b-instruct-q4_K_M", ModelBackend.OLLAMA, 0.7),
                ("llama3", ModelBackend.OLLAMA, 0.5)
            ]
    
    async def shutdown(self):
        """关闭预加载器"""
        self._active = False
        # 清空预加载队列
        while not self._preload_queue.empty():
            try:
                self._preload_queue.get_nowait()
                self._preload_queue.task_done()
            except:
                break
        logger.info("模型预加载器已关闭")


class AdvancedModelManager:
    """高级模型管理器
    实现智能模型管理、优化的切换策略和内存管理
    """
    
    _instance = None
    _lock = asyncio.Lock()
    
    def __new__(cls):
        if cls._instance is None:
            cls._instance = super().__new__(cls)
        return cls._instance
    
    def __init__(self):
        # 确保只初始化一次
        if not hasattr(self, '_initialized'):
            self._resource_monitor = get_resource_monitor()
            self._model_adapter = get_model_adapter()
            self._preloader = ModelPreloader(self._resource_monitor)
            
            # 模型状态管理
            self._loaded_models: Dict[str, ModelLoadInfo] = {}
            self._active_model: Optional[str] = None
            self._state = ModelManagerState.INITIALIZING
            self._switch_lock = asyncio.Lock()
            self._health_check_interval = 60  # 秒
            
            # 优化参数
            self._model_warmup_enabled = True
            self._aggressive_unload = False  # 激进卸载模式
            self._memory_threshold_adjustment = 0.95  # 内存阈值调整因子
            
            # 初始化标志
            self._initialized = False
            
            # 启动健康检查任务
            self._health_check_task = None
            
    async def initialize(self):
        """初始化模型管理器"""
        if self._initialized:
            return
        
        try:
            logger.info("正在初始化高级模型管理器...")
            
            # 初始化预加载器
            await self._preloader.initialize()
            
            # 启动健康检查任务
            self._health_check_task = asyncio.create_task(self._scheduled_health_check())
            
            # 加载当前活动模型信息
            await self._load_current_model_info()
            
            # 启动基于使用模式的预加载
            asyncio.create_task(self._periodic_preload())
            
            self._state = ModelManagerState.READY
            self._initialized = True
            logger.info("高级模型管理器初始化完成")
            
        except Exception as e:
            logger.error(f"初始化模型管理器失败: {str(e)}")
            self._state = ModelManagerState.ERROR
            raise
    
    async def _load_current_model_info(self):
        """加载当前活动模型信息"""
        try:
            # 获取当前使用的模型
            # 这里需要与ModelAdapter交互获取信息
            current_model = getattr(self._model_adapter, '_current_model', None)
            if current_model:
                self._active_model = current_model
                # 获取模型资源信息
                resource_info = get_model_resource_info(current_model)
                if resource_info:
                    self._loaded_models[current_model] = ModelLoadInfo(
                        model_name=current_model,
                        backend=ModelBackend.OLLAMA,  # 假设当前是Ollama
                        memory_mb=resource_info.get("memory_mb", 0),
                        load_time=time.time(),
                        last_used=time.time()
                    )
        except Exception as e:
            logger.error(f"加载当前模型信息失败: {str(e)}")
    
    async def _scheduled_health_check(self):
        """定时健康检查任务"""
        while self._state == ModelManagerState.READY:
            try:
                await self._check_models_health()
                await self._optimize_model_memory()
                await asyncio.sleep(self._health_check_interval)
            except asyncio.CancelledError:
                break
            except Exception as e:
                logger.error(f"健康检查任务错误: {str(e)}")
                await asyncio.sleep(60)  # 出错后等待更长时间再重试
    
    async def _check_models_health(self):
        """检查所有加载模型的健康状态"""
        current_time = time.time()
        
        for model_name, load_info in list(self._loaded_models.items()):
            try:
                # 检查长时间未使用的模型
                if (current_time - load_info.last_used) > 3600:  # 超过1小时未使用
                    logger.info(f"模型 {model_name} 长时间未使用，考虑卸载")
                    if self._aggressive_unload or len(self._loaded_models) > 1:
                        await self._unload_model(model_name)
            except Exception as e:
                logger.error(f"检查模型 {model_name} 健康状态失败: {str(e)}")
    
    async def _optimize_model_memory(self):
        """优化模型内存使用"""
        # 获取当前内存使用情况
        memory_usage = await self._resource_monitor.get_memory_usage()
        available_memory = await self._resource_monitor.get_available_memory_mb()
        
        # 如果内存使用率超过阈值，尝试卸载不活跃的模型
        if memory_usage > MODEL_SWITCH_THRESHOLD * self._memory_threshold_adjustment:
            logger.warning(f"内存使用率过高 ({memory_usage}%)，尝试优化模型内存")
            
            # 按最后使用时间排序，保留最近使用的模型
            models_to_unload = sorted(
                [m for m in self._loaded_models.items() if m[0] != self._active_model],
                key=lambda x: x[1].last_used
            )
            
            # 尝试卸载最早使用的模型
            if models_to_unload:
                await self._unload_model(models_to_unload[0][0])
    
    async def _unload_model(self, model_name: str):
        """卸载指定模型"""
        if model_name in self._loaded_models:
            logger.info(f"开始卸载模型: {model_name}")
            # 这里可以实现实际的模型卸载逻辑
            # 目前只是从记录中移除
            del self._loaded_models[model_name]
            logger.info(f"模型 {model_name} 已卸载")
    
    async def _periodic_preload(self):
        """定期执行预测性预加载"""
        while self._state == ModelManagerState.READY:
            try:
                # 每30分钟分析并预加载一次
                await self._preloader.preload_by_usage_prediction()
                await asyncio.sleep(1800)
            except asyncio.CancelledError:
                break
            except Exception as e:
                logger.error(f"定期预加载任务错误: {str(e)}")
                await asyncio.sleep(300)  # 出错后等待5分钟再重试
    
    async def get_optimal_model(self, task_complexity: str = "medium", **kwargs) -> Tuple[str, ModelBackend]:
        """获取最优模型配置
        
        Args:
            task_complexity: 任务复杂度 (low/medium/high)
            **kwargs: 其他参数
            
        Returns:
            (模型名称, 后端类型)
        """
        # 获取推荐的模型大小
        recommended_size = await self._resource_monitor.get_recommended_model_size()
        
        # 根据任务复杂度和系统资源选择模型
        model_map = {
            "small": {
                "low": ("phi3:3.8b-instruct-q4_K_M", ModelBackend.OLLAMA),
                "medium": ("phi3:3.8b-instruct-q4_K_M", ModelBackend.OLLAMA),
                "high": ("phi3:3.8b-instruct-q4_K_M", ModelBackend.OLLAMA)
            },
            "medium": {
                "low": ("phi3:3.8b-instruct-q4_K_M", ModelBackend.OLLAMA),
                "medium": ("qwen2:7b-instruct-q4_K_M", ModelBackend.OLLAMA),
                "high": ("qwen2:7b-instruct-q4_K_M", ModelBackend.OLLAMA)
            },
            "large": {
                "low": ("phi3:3.8b-instruct-q4_K_M", ModelBackend.OLLAMA),
                "medium": ("qwen2:7b-instruct-q4_K_M", ModelBackend.OLLAMA),
                "high": ("llama3", ModelBackend.OLLAMA)
            }
        }
        
        # 获取对应的模型配置
        model_info = model_map.get(recommended_size, {}).get(task_complexity, 
                                                           ("phi3:3.8b-instruct-q4_K_M", ModelBackend.OLLAMA))
        
        return model_info
    
    async def switch_model(self, model_name: str, backend: ModelBackend) -> bool:
        """切换到指定模型
        
        Args:
            model_name: 模型名称
            backend: 模型后端
            
        Returns:
            bool: 是否切换成功
        """
        async with self._switch_lock:
            # 检查是否已经是当前模型
            if self._active_model == model_name:
                logger.debug(f"已经是当前模型: {model_name}")
                return True
            
            logger.info(f"准备切换到模型: {model_name} ({backend.value})")
            
            # 检查资源是否足够
            resource_info = get_model_resource_info(model_name)
            if resource_info:
                can_load = await self._resource_monitor.can_load_model(
                    model_name, resource_info.get("memory_mb", 0)
                )
                if not can_load:
                    logger.warning(f"资源不足，无法切换到模型: {model_name}")
                    # 尝试释放内存
                    await self._optimize_model_memory()
                    # 再次检查
                    can_load = await self._resource_monitor.can_load_model(
                        model_name, resource_info.get("memory_mb", 0)
                    )
                    if not can_load:
                        return False
            
            try:
                # 通过模型适配器切换模型
                await self._model_adapter._switch_to_model(backend, model_name)
                
                # 更新模型状态
                self._active_model = model_name
                if model_name not in self._loaded_models:
                    self._loaded_models[model_name] = ModelLoadInfo(
                        model_name=model_name,
                        backend=backend,
                        memory_mb=resource_info.get("memory_mb", 0) if resource_info else 0,
                        load_time=time.time(),
                        last_used=time.time()
                    )
                else:
                    self._loaded_models[model_name].last_used = time.time()
                    self._loaded_models[model_name].request_count += 1
                
                logger.info(f"成功切换到模型: {model_name}")
                return True
                
            except Exception as e:
                logger.error(f"切换模型失败: {model_name}, 错误: {str(e)}")
                return False
    
    async def register_model_usage(self, model_name: str, response_time: float):
        """注册模型使用记录
        
        Args:
            model_name: 模型名称
            response_time: 响应时间（秒）
        """
        if model_name in self._loaded_models:
            load_info = self._loaded_models[model_name]
            load_info.last_used = time.time()
            load_info.request_count += 1
            
            # 更新平均响应时间（简单加权）
            total_response_time = load_info.avg_response_time * (load_info.request_count - 1)
            load_info.avg_response_time = (total_response_time + response_time) / load_info.request_count
    
    async def get_model_status(self) -> Dict[str, Any]:
        """获取模型系统状态
        
        Returns:
            Dict: 模型系统状态信息
        """
        # 获取资源状态
        memory_usage = await self._resource_monitor.get_memory_usage()
        available_memory = await self._resource_monitor.get_available_memory_mb()
        recommended_size = await self._resource_monitor.get_recommended_model_size()
        
        # 构建状态信息
        status = {
            "manager_state": self._state.value,
            "active_model": self._active_model,
            "loaded_models_count": len(self._loaded_models),
            "loaded_models": [
                {
                    "name": info.model_name,
                    "backend": info.backend.value,
                    "memory_mb": info.memory_mb,
                    "state": info.state.value,
                    "request_count": info.request_count,
                    "avg_response_time": info.avg_response_time,
                    "last_used": datetime.fromtimestamp(info.last_used).isoformat()
                }
                for info in self._loaded_models.values()
            ],
            "resource_info": {
                "memory_usage_percent": memory_usage,
                "available_memory_mb": available_memory,
                "recommended_model_size": recommended_size
            },
            "preload_queue_size": self._preloader._preload_queue.qsize() if hasattr(self._preloader, '_preload_queue') else 0
        }
        
        return status
    
    async def set_optimization_parameters(self, **kwargs):
        """设置优化参数
        
        Args:
            **kwargs: 优化参数
                - aggressive_unload: 是否启用激进卸载模式
                - memory_threshold_adjustment: 内存阈值调整因子
                - health_check_interval: 健康检查间隔（秒）
                - model_warmup_enabled: 是否启用模型预热
        """
        if "aggressive_unload" in kwargs:
            self._aggressive_unload = kwargs["aggressive_unload"]
        if "memory_threshold_adjustment" in kwargs:
            self._memory_threshold_adjustment = kwargs["memory_threshold_adjustment"]
        if "health_check_interval" in kwargs:
            self._health_check_interval = kwargs["health_check_interval"]
        if "model_warmup_enabled" in kwargs:
            self._model_warmup_enabled = kwargs["model_warmup_enabled"]
        
        logger.info(f"模型管理器优化参数已更新: {kwargs}")
    
    async def pause(self):
        """暂停模型管理器"""
        if self._state == ModelManagerState.READY:
            self._state = ModelManagerState.PAUSED
            logger.info("模型管理器已暂停")
    
    async def resume(self):
        """恢复模型管理器"""
        if self._state == ModelManagerState.PAUSED:
            self._state = ModelManagerState.READY
            logger.info("模型管理器已恢复")
    
    async def shutdown(self):
        """关闭模型管理器"""
        # 停止健康检查任务
        if self._health_check_task:
            self._health_check_task.cancel()
            try:
                await self._health_check_task
            except asyncio.CancelledError:
                pass
        
        # 关闭预加载器
        await self._preloader.shutdown()
        
        # 清空模型记录
        self._loaded_models.clear()
        self._active_model = None
        
        self._state = ModelManagerState.INITIALIZING
        self._initialized = False
        logger.info("高级模型管理器已关闭")


# 全局模型管理器实例
_advanced_model_manager = None


async def get_advanced_model_manager() -> AdvancedModelManager:
    """获取高级模型管理器实例
    
    Returns:
        AdvancedModelManager: 模型管理器实例
    """
    global _advanced_model_manager
    
    if _advanced_model_manager is None:
        _advanced_model_manager = AdvancedModelManager()
        await _advanced_model_manager.initialize()
    
    return _advanced_model_manager


async def init_advanced_model_manager():
    """初始化高级模型管理器"""
    manager = await get_advanced_model_manager()
    logger.info("高级模型管理器已初始化")


async def close_advanced_model_manager():
    """关闭高级模型管理器"""
    global _advanced_model_manager
    
    if _advanced_model_manager:
        await _advanced_model_manager.shutdown()
        _advanced_model_manager = None
    
    logger.info("高级模型管理器已关闭")


async def optimize_model_selection(task_complexity: str = "medium", **kwargs) -> Tuple[str, ModelBackend]:
    """优化的模型选择函数
    
    Args:
        task_complexity: 任务复杂度
        **kwargs: 其他参数
        
    Returns:
        (模型名称, 后端类型)
    """
    manager = await get_advanced_model_manager()
    return await manager.get_optimal_model(task_complexity, **kwargs)


async def switch_to_optimized_model(task_complexity: str = "medium", **kwargs) -> bool:
    """切换到优化的模型
    
    Args:
        task_complexity: 任务复杂度
        **kwargs: 其他参数
        
    Returns:
        bool: 是否切换成功
    """
    manager = await get_advanced_model_manager()
    model_name, backend = await manager.get_optimal_model(task_complexity, **kwargs)
    return await manager.switch_model(model_name, backend)


async def register_model_usage_metrics(model_name: str, response_time: float):
    """注册模型使用指标
    
    Args:
        model_name: 模型名称
        response_time: 响应时间（秒）
    """
    try:
        manager = await get_advanced_model_manager()
        await manager.register_model_usage(model_name, response_time)
    except Exception as e:
        logger.warning(f"注册模型使用指标失败: {str(e)}")