"""
资源监控模块
实时监控系统内存、CPU等资源使用情况
为模型动态切换提供决策依据
"""

import psutil
import time
from typing import Dict, Optional, Any, List, Tuple, Set
from threading import Thread
import asyncio
from utils.logger import Logger
from utils.constants import MODEL_SWITCH_THRESHOLD, MODEL_RESOURCE_THRESHOLDS, RECOMMENDED_MODELS
# 定义云端fallback阈值
CLOUD_FALLBACK_THRESHOLD = 90

logger = Logger.get_logger("resource_monitor")


class ResourceMonitor:
    """资源监控器"""
    
    def __init__(self):
        self._is_running = False
        self._monitor_thread: Optional[Thread] = None
        self._memory_usage: float = 0.0
        self._cpu_usage: float = 0.0
        self._available_memory_mb: int = 0
        self._total_memory_mb: int = 0
        self._last_update_time: float = 0.0
        self._update_interval: float = 1.0  # 进一步缩短更新间隔到1秒，提高响应速度
        self._model_memory_usage: Dict[str, float] = {}  # 模型内存占用统计
        self._lock = asyncio.Lock()
        self._high_memory_duration: int = 0  # 高内存持续时间(秒)
        self._model_switch_history: List[Dict[str, Any]] = []  # 模型切换历史
        self._min_memory_available: int = 500  # 最小可用内存要求(MB)
        self._model_usage_history: Dict[str, List[Tuple[float, float]]] = {}  # 模型使用历史记录
        self._system_memory_total = psutil.virtual_memory().total // (1024 * 1024)  # 系统总内存(MB)
    
    async def start(self):
        """启动资源监控"""
        if self._is_running:
            return
        
        self._is_running = True
        self._monitor_thread = Thread(target=self._monitor_loop, daemon=True)
        self._monitor_thread.start()
        logger.info("资源监控已启动")
    
    async def stop(self):
        """停止资源监控"""
        self._is_running = False
        if self._monitor_thread:
            self._monitor_thread.join(timeout=5.0)
        logger.info("资源监控已停止")
    
    def _monitor_loop(self):
        """监控循环（后台线程）"""
        # 初始化总内存信息
        memory = psutil.virtual_memory()
        self._total_memory_mb = int(memory.total / 1024 / 1024)
        self._system_memory_total = self._total_memory_mb
        logger.info(f"系统总内存: {self._total_memory_mb}MB")
        
        while self._is_running:
            try:
                # 更新内存使用率
                memory = psutil.virtual_memory()
                cpu = psutil.cpu_percent(interval=0.1)  # 降低采样间隔提高响应速度
                
                self._memory_usage = memory.percent
                self._cpu_usage = cpu
                self._available_memory_mb = int(memory.available / 1024 / 1024)
                self._last_update_time = time.time()
                
                # 更新高内存状态持续时间
                if self._memory_usage >= MODEL_SWITCH_THRESHOLD:
                    self._high_memory_duration += self._update_interval
                else:
                    self._high_memory_duration = 0
                
                # 记录日志（每15秒记录一次，增加监控密度）
                if time.time() % 15 < 1:
                    logger.debug(f"系统资源: 内存使用率={self._memory_usage}%, CPU使用率={self._cpu_usage}%, "
                              f"可用内存={self._available_memory_mb}MB, 总内存={self._total_memory_mb}MB")
                
                # 高内存状态告警
                if self._memory_usage >= MODEL_SWITCH_THRESHOLD and time.time() % 10 < 1:
                    logger.warning(f"高内存状态: 使用率={self._memory_usage}%, 可用={self._available_memory_mb}MB, "
                                f"持续时间={self._high_memory_duration:.1f}秒")
                
                # 极高内存使用率告警（接近OOM）
                if self._memory_usage >= 95:
                    logger.error(f"危急内存状态: 使用率={self._memory_usage}%, 可用={self._available_memory_mb}MB, "
                              f"请立即释放内存或切换到云端服务")
                    
            except Exception as e:
                logger.error(f"资源监控出错: {str(e)}")
            
            # 等待下一次更新
            time.sleep(self._update_interval)
    
    async def get_memory_usage(self) -> float:
        """获取当前内存使用率"""
        async with self._lock:
            # 主动更新一次以获取最新值
            memory = psutil.virtual_memory()
            self._memory_usage = memory.percent
            self._available_memory_mb = int(memory.available / 1024 / 1024)
            return self._memory_usage
    
    async def get_cpu_usage(self) -> float:
        """获取当前CPU使用率"""
        async with self._lock:
            # 主动更新一次以获取最新值
            self._cpu_usage = psutil.cpu_percent(interval=0.05)
            return self._cpu_usage
    
    async def record_model_memory(self, model_name: str, memory_mb: float):
        """记录模型内存占用"""
        async with self._lock:
            self._model_memory_usage[model_name] = memory_mb
            
            # 更新模型使用历史
            if model_name not in self._model_usage_history:
                self._model_usage_history[model_name] = []
            
            self._model_usage_history[model_name].append((time.time(), memory_mb))
            
            # 保留最近50条记录
            if len(self._model_usage_history[model_name]) > 50:
                self._model_usage_history[model_name] = self._model_usage_history[model_name][-50:]
                
            logger.info(f"记录模型内存占用: {model_name} = {memory_mb:.2f}MB")
    
    async def record_model_switch(self, from_model: Optional[str], to_model: str):
        """记录模型切换"""
        async with self._lock:
            switch_info = {
                "timestamp": time.time(),
                "from_model": from_model,
                "to_model": to_model,
                "memory_usage": self._memory_usage,
                "available_memory_mb": self._available_memory_mb,
                "reason": await self.get_model_switch_reason()
            }
            self._model_switch_history.append(switch_info)
            
            # 只保留最近10条记录
            if len(self._model_switch_history) > 10:
                self._model_switch_history.pop(0)
            
            logger.info(f"模型切换记录: {from_model} -> {to_model}, 内存使用率: {switch_info['memory_usage']}%")
    
    async def get_model_memory_usage(self, model_name: str) -> Optional[float]:
        """获取模型内存占用"""
        async with self._lock:
            return self._model_memory_usage.get(model_name)
    
    async def get_recommended_model_size(self) -> str:
        """
        根据当前资源使用情况推荐模型大小
        返回: small, medium, large
        """
        async with self._lock:
            memory_usage = self._memory_usage
            available_memory = self._available_memory_mb
        
        # 优先考虑可用内存绝对量
        if available_memory < 2000:  # 可用内存小于2GB
            logger.debug(f"推荐小模型: 可用内存不足 ({available_memory}MB < 2000MB)")
            return "small"
        elif available_memory < 4000:  # 可用内存小于4GB
            logger.debug(f"推荐中模型: 可用内存适中 ({available_memory}MB < 4000MB)")
            return "medium"
        
        # 基于内存使用率的判断
        if memory_usage >= MODEL_SWITCH_THRESHOLD:
            logger.debug(f"推荐小模型: 内存使用率过高 ({memory_usage}% >= {MODEL_SWITCH_THRESHOLD}%)")
            return "small"
        elif memory_usage >= 75:
            logger.debug(f"推荐中模型: 内存使用率较高 ({memory_usage}% >= 75%)")
            return "medium"
        else:
            logger.debug(f"推荐大模型: 内存充足 ({memory_usage}% < 75%)")
            return "large"
    
    async def should_switch_model(self) -> bool:
        """判断是否需要切换模型"""
        async with self._lock:
            memory_usage = self._memory_usage
            available_memory = self._available_memory_mb
            high_memory_duration = self._high_memory_duration
            total_memory = self._total_memory_mb
        
        # 快速判断：内存使用率超过阈值或可用内存低于最小要求
        if memory_usage >= MODEL_SWITCH_THRESHOLD:
            logger.info(f"触发模型切换: 内存使用率 {memory_usage}% >= {MODEL_SWITCH_THRESHOLD}%")
            return True
        
        if available_memory < self._min_memory_available:
            logger.info(f"触发模型切换: 可用内存 {available_memory}MB < {self._min_memory_available}MB")
            return True
        
        # 持续性判断：高内存状态持续超过5秒，避免临时波动
        if high_memory_duration >= 5:
            logger.info(f"触发模型切换: 高内存状态持续 {high_memory_duration:.1f}秒")
            return True
        
        # 新增：检查已加载模型总内存占用是否过高
        total_model_memory = sum(self._model_memory_usage.values())
        if total_model_memory > total_memory * 0.6:  # 模型总内存超过系统内存60%
            logger.info(f"触发模型切换: 模型总内存占用 {total_model_memory}MB > 系统内存60% ({total_memory * 0.6}MB)")
            return True
        
        return False
    
    async def should_use_cloud_service(self) -> bool:
        """判断是否应该使用云端LLM服务"""
        async with self._lock:
            memory_usage = self._memory_usage
            available_memory = self._available_memory_mb
            total_memory = self._total_memory_mb
        
        # 内存使用率超过云端fallback阈值
        if memory_usage >= CLOUD_FALLBACK_THRESHOLD:
            logger.warning(f"触发云端fallback: 内存使用率 {memory_usage}% >= {CLOUD_FALLBACK_THRESHOLD}%")
            return True
        
        # 可用内存极低
        if available_memory < 500:  # 可用内存小于500MB
            logger.warning(f"触发云端fallback: 可用内存极低 {available_memory}MB")
            return True
        
        # 系统总内存过小
        if total_memory < 6000:  # 总内存小于6GB
            logger.warning(f"触发云端fallback: 系统内存过小 {total_memory}MB")
            return True
        
        # 检查是否有大模型且内存仍然紧张
        large_models = [model for model, info in MODEL_RESOURCE_CONFIG.items() 
                       if info.get("type") == "large" and model in self._model_memory_usage]
        
        if large_models and memory_usage > 75:
            logger.warning(f"触发云端fallback: 已加载大模型且内存使用率仍高 ({memory_usage}%)")
            return True
        
        return False
    
    async def get_available_memory_mb(self) -> int:
        """获取当前可用内存(MB)"""
        async with self._lock:
            # 主动更新
            memory = psutil.virtual_memory()
            self._available_memory_mb = int(memory.available / 1024 / 1024)
            return self._available_memory_mb
    
    async def get_total_memory_mb(self) -> int:
        """获取系统总内存(MB)"""
        return self._total_memory_mb
    
    async def can_load_model(self, model_name: str) -> bool:
        """判断是否可以加载指定模型"""
        # 获取模型资源信息
        model_info = get_model_resource_info(model_name)
        if not model_info:
            logger.warning(f"未知模型资源信息: {model_name}")
            return False
        
        estimated_memory = model_info.get("memory_mb", 0)
        available_memory = await self.get_available_memory_mb()
        model_type = model_info.get("type", "medium")
        
        # 获取当前已加载模型的总内存占用
        current_model_memory = sum(self._model_memory_usage.values())
        
        # 根据模型类型和当前内存状态动态调整安全边界
        if model_type == "large":
            # 大模型需要更保守的安全边界
            safety_margin = 0.6  # 只使用可用内存的60%
            # 检查是否已经有大模型加载
            existing_large_models = [m for m, info in MODEL_RESOURCE_CONFIG.items() 
                                   if info.get("type") == "large" and m in self._model_memory_usage]
            if existing_large_models:
                logger.warning(f"已有大模型加载: {existing_large_models}, 不建议再加载大模型")
                return False
        elif model_type == "medium":
            safety_margin = 0.7
        else:  # small
            safety_margin = 0.8
        
        # 计算安全可用内存
        safe_available = available_memory * safety_margin
        
        # 计算加载后预计的总内存占用
        estimated_total = current_model_memory + estimated_memory
        
        # 考虑系统总内存的限制（最多使用70%的系统内存）
        system_limit = self._total_memory_mb * 0.7
        
        # 检查是否满足所有条件
        can_load = (estimated_memory <= safe_available and 
                   estimated_total <= system_limit and
                   # 额外检查：确保加载后仍有至少500MB可用内存
                   (available_memory - estimated_memory) >= 500)
        
        logger.debug(
            f"模型加载检查: {model_name} ({estimated_memory}MB, {model_type}) - "
            f"可用: {available_memory}MB, 安全可用: {int(safe_available)}MB, "
            f"当前模型占用: {int(current_model_memory)}MB, 预计总占用: {int(estimated_total)}MB, "
            f"系统限制: {int(system_limit)}MB, 结果: {can_load}"
        )
        
        return can_load
    
    async def get_optimized_model_config(self, model_type: str = None) -> Dict[str, Any]:
        """获取优化后的模型配置"""
        # 推荐的模型配置
        recommended_model = await self.get_recommended_model_by_type(model_type)
        
        # 获取模型资源信息
        model_info = get_model_resource_info(recommended_model)
        if not model_info:
            return {"model_name": recommended_model}
        
        # 根据当前资源状态优化配置
        config = {
            "model_name": recommended_model,
            "memory_mb": model_info.get("memory_mb", 0),
            "type": model_info.get("type", "medium"),
            "recommended": True,
            "optimization": {}
        }
        
        # 根据内存使用情况添加优化参数
        memory_usage = await self.get_memory_usage()
        if memory_usage > 70:
            # 内存较紧张，添加优化参数
            config["optimization"]["low_resource_mode"] = True
            config["optimization"]["max_tokens"] = 512  # 限制生成长度
        
        return config
    
    async def get_recommended_model_by_type(self, model_type: str = None) -> str:
        """根据模型类型和资源状态推荐具体模型"""
        # 如果没有指定类型，根据资源状态推荐
        if not model_type:
            model_type = await self.get_recommended_model_size()
        
        # 获取该类型的推荐模型列表
        recommended_models = get_recommended_models_by_size(model_type)
        
        # 从推荐模型列表中选择最优的一个
        for model in recommended_models:
            if await self.can_load_model(model):
                logger.info(f"推荐模型: {model} (类型: {model_type})")
                return model
        
        # 如果没有找到合适的模型，尝试降级到更小的类型
        if model_type == "large":
            logger.warning("无法加载推荐的大模型，尝试降级到中模型")
            return await self.get_recommended_model_by_type("medium")
        elif model_type == "medium":
            logger.warning("无法加载推荐的中模型，尝试降级到小模型")
            return await self.get_recommended_model_by_type("small")
        else:  # small
            # 如果小模型也无法加载，返回第一个推荐的小模型作为备选
            return recommended_models[0] if recommended_models else "phi3:3.8b-instruct-q4_K_M"
    
    async def get_model_switch_reason(self) -> str:
        """获取模型切换原因"""
        async with self._lock:
            memory_usage = self._memory_usage
            available_memory = self._available_memory_mb
            high_memory_duration = self._high_memory_duration
        
        if memory_usage >= MODEL_SWITCH_THRESHOLD:
            return f"内存使用率过高 ({memory_usage:.1f}% >= {MODEL_SWITCH_THRESHOLD}%)"
        elif available_memory < self._min_memory_available:
            return f"可用内存不足 ({available_memory}MB < {self._min_memory_available}MB)"
        elif high_memory_duration >= 5:
            return f"高内存状态持续时间过长 ({high_memory_duration:.1f}秒)"
        else:
            return "资源充足，无需切换"


# 创建全局资源监控实例
resource_monitor = ResourceMonitor()


def get_resource_monitor() -> ResourceMonitor:
    """获取资源监控器实例"""
    return resource_monitor


# 模型资源配置 - 更详细的模型资源信息
MODEL_RESOURCE_CONFIG = {
    # 小型量化模型（低内存消耗）
    "phi3:3.8b-instruct-q4_K_M": {"memory_mb": 1800, "cpu_cores": 2, "type": "small", "recommended": True},
    "gemma:2b-instruct-q4_K_M": {"memory_mb": 1200, "cpu_cores": 1, "type": "small", "recommended": True},
    "llama3:8b-instruct-q4_K_M": {"memory_mb": 4200, "cpu_cores": 4, "type": "medium", "recommended": True},
    "llama3:8b-instruct-q5_K_M": {"memory_mb": 4800, "cpu_cores": 4, "type": "medium", "recommended": False},
    
    # 中型量化模型（平衡性能和资源）
    "qwen2:7b-instruct-q4_K_M": {"memory_mb": 3200, "cpu_cores": 4, "type": "medium", "recommended": True},
    "mistral:7b-instruct-v0.2-q4_K_M": {"memory_mb": 3400, "cpu_cores": 4, "type": "medium", "recommended": True},
    "qwen2:7b-instruct-q5_K_M": {"memory_mb": 3800, "cpu_cores": 4, "type": "medium", "recommended": False},
    "gemma:7b-instruct-q4_K_M": {"memory_mb": 3500, "cpu_cores": 4, "type": "medium", "recommended": True},
    
    # 大型模型（高性能，高内存消耗）
    "llama3": {"memory_mb": 4660, "cpu_cores": 8, "type": "large", "recommended": True},
    "llama3:8b": {"memory_mb": 4660, "cpu_cores": 8, "type": "large", "recommended": True},
    "gemma:7b-instruct": {"memory_mb": 3800, "cpu_cores": 8, "type": "large", "recommended": True},
    "qwen2:14b-instruct-q4_K_M": {"memory_mb": 6500, "cpu_cores": 8, "type": "large", "recommended": False},
    
    # 兼容性配置和极端情况备选模型
    "phi3:3.8b": {"memory_mb": 2200, "cpu_cores": 2, "type": "small", "recommended": False},
    "phi3:mini-instruct": {"memory_mb": 1000, "cpu_cores": 1, "type": "small", "recommended": True},  # 极小模型，用于极端资源限制情况
}


def get_model_resource_info(model_name: str) -> Optional[Dict[str, Any]]:
    """获取模型资源配置信息"""
    # 标准化模型名称
    if model_name == "llama3" and model_name not in MODEL_RESOURCE_CONFIG:
        model_name = "llama3:8b"
    
    return MODEL_RESOURCE_CONFIG.get(model_name)

def get_recommended_models_by_size(size: str) -> List[str]:
    """获取指定大小的推荐模型列表"""
    # 优先使用常量配置中的推荐模型
    if size in RECOMMENDED_MODELS:
        return RECOMMENDED_MODELS[size]
    
    # 备用方案：根据资源信息筛选
    models = []
    for model_name, info in MODEL_RESOURCE_CONFIG.items():
        if info.get("recommended", False) and info.get("type") == size:
            models.append(model_name)
    
    # 确保返回至少一个模型作为备选
    if not models:
        if size == "small":
            models = ["phi3:3.8b-instruct-q4_K_M"]
        elif size == "medium":
            models = ["qwen2:7b-instruct-q4_K_M"]
        else:
            models = ["llama3:8b"]
    
    return models

def estimate_model_performance(model_name: str) -> float:
    """估算模型性能分数（0-100）"""
    info = get_model_resource_info(model_name)
    if not info:
        return 50  # 默认中等性能
    
    # 基于模型大小和量化级别估算性能
    memory_mb = info.get("memory_mb", 0)
    model_type = info.get("type", "medium")
    
    # 基础分数
    base_score = 70 if model_type == "small" else 85 if model_type == "medium" else 95
    
    # 根据量化级别调整
    if "q4" in model_name.lower():
        base_score -= 5
    elif "q5" in model_name.lower():
        base_score -= 2
    
    return min(max(base_score, 0), 100)  # 确保分数在0-100之间


async def init_resource_monitoring():
    """初始化资源监控"""
    monitor = get_resource_monitor()
    await monitor.start()
    
    # 初始化系统内存信息
    memory = psutil.virtual_memory()
    monitor._total_memory_mb = int(memory.total / 1024 / 1024)
    
    logger.info(f"资源监控模块初始化完成，系统总内存: {monitor._total_memory_mb}MB")
    logger.info(f"模型切换阈值: {MODEL_SWITCH_THRESHOLD}%")
    
    # 检查初始系统资源状况
    memory_usage = await monitor.get_memory_usage()
    available_memory = await monitor.get_available_memory_mb()
    
    if memory_usage >= 80:
        logger.warning(f"系统内存使用率较高: {memory_usage}%，可用内存: {available_memory}MB，建议注意模型选择")


async def close_resource_monitoring():
    """关闭资源监控"""
    monitor = get_resource_monitor()
    await monitor.stop()
    logger.info("资源监控模块已关闭")


# 系统初始化时自动启动资源监控
import atexit

def _ensure_resource_monitor_stopped():
    """确保程序退出时关闭资源监控"""
    try:
        monitor = get_resource_monitor()
        if monitor._is_running:
            # 创建一个临时事件循环来运行关闭操作
            import asyncio
            loop = asyncio.new_event_loop()
            asyncio.set_event_loop(loop)
            loop.run_until_complete(close_resource_monitoring())
            loop.close()
    except Exception as e:
        logger.error(f"关闭资源监控时出错: {str(e)}")

# 注册退出时的清理函数
atexit.register(_ensure_resource_monitor_stopped)