"""
智能路由系统
基于请求特征、模型能力、系统负载和性能指标实现智能路由
"""

from typing import Optional, Dict, Any, List, Tuple
from dataclasses import dataclass, field
import asyncio
import re
import json
from collections import defaultdict, deque
from datetime import datetime, timedelta

from utils.logger import Logger
from utils.constants import ErrorCode, TaskPriority, MODEL_CAPABILITIES
from middleware.model_adapter import get_model_adapter, ModelAdapter
from middleware.model_client import ModelType, ModelClientFactory, BaseModelClient
from app.core.resource_scheduler import get_resource_scheduler
from app.core.resource_monitor import get_resource_monitor
from middleware.redis_adapter import get_redis_adapter
from config.model_config import model_config_manager, ModelBackend

logger = Logger.get_logger(agent_name="smart_router")


@dataclass
class RequestContext:
    """
    请求上下文信息
    """
    request_id: str
    query: str
    request_type: str  # 如: 'text_qa', 'document_qa', 'multi_modal', 'tool_call'
    user_id: Optional[str] = None
    priority: TaskPriority = TaskPriority.MEDIUM
    timeout: float = 30.0  # 默认30秒超时
    required_capabilities: List[str] = field(default_factory=list)
    metadata: Dict[str, Any] = field(default_factory=dict)
    created_at: datetime = field(default_factory=datetime.now)


@dataclass
class RoutingDecision:
    """
    路由决策结果
    """
    model_backend: str
    model_name: str
    is_cloud: bool = False
    confidence: float = 0.0
    estimated_time: float = 0.0  # 估计响应时间
    reasoning: List[str] = field(default_factory=list)


class SmartRouter:
    """
    基于模型的智能路由系统
    实现智能的请求路由和模型选择
    """
    
    _instance: Optional['SmartRouter'] = None
    _lock = asyncio.Lock()
    
    def __init__(self):
        self.model_adapter = get_model_adapter()
        self.resource_monitor = get_resource_monitor()
        self.resource_scheduler = get_resource_scheduler()
        self.redis_client = get_redis_adapter()
        self.model_client_factory = ModelClientFactory()
        
        # 模型能力缓存
        self.model_capabilities_cache: Dict[str, List[str]] = {}
        self.capabilities_last_update: Dict[str, datetime] = {}
        
        # 路由统计
        self.routing_statistics: Dict[str, Dict[str, int]] = defaultdict(lambda: {
            'total': 0, 'success': 0, 'failure': 0, 'timeout': 0
        })
        
        # 响应时间历史 (用于估算)
        self.response_time_history: Dict[str, deque] = defaultdict(lambda: deque(maxlen=100))
        
        # 请求模式识别规则
        self.request_patterns = {
            'text_qa': [
                (re.compile(r'问题|请问|如何|为什么|什么|解释|说明'), 0.8),
                (re.compile(r'^\w+\?$'), 0.9),
            ],
            'document_qa': [
                (re.compile(r'基于文档|根据上文|文档中|在资料中|参考'), 0.9),
            ],
            'multi_modal': [
                (re.compile(r'图片|图像|截图|照片|查看|识别|描述'), 0.8),
            ],
            'tool_call': [
                (re.compile(r'搜索|查找|查询|计算|转换|执行|调用'), 0.8),
            ],
            'code_generation': [
                (re.compile(r'编写|生成|代码|函数|实现|算法'), 0.85),
            ],
            'summarization': [
                (re.compile(r'总结|概括|摘要|要点|提炼'), 0.9),
            ],
        }
        
        # 模型性能权重配置
        self.performance_weights = {
            'success_rate': 0.4,   # 成功率权重
            'response_time': 0.3,  # 响应时间权重 (负值)
            'resource_efficiency': 0.2,  # 资源效率权重
            'capability_match': 0.1,  # 能力匹配度权重
        }
        
        logger.info("智能路由系统初始化完成")
    
    @classmethod
    async def get_instance(cls) -> 'SmartRouter':
        """
        获取智能路由实例（单例模式）
        """
        async with cls._lock:
            if cls._instance is None:
                cls._instance = SmartRouter()
                await cls._instance.initialize()
            return cls._instance
    
    async def initialize(self):
        """
        初始化智能路由系统
        """
        # 加载模型能力信息
        await self._load_model_capabilities()
        # 加载历史性能数据
        await self._load_performance_history()
        # 启动定期更新任务
        asyncio.create_task(self._periodic_update())
        
        logger.info("智能路由系统初始化完成")
    
    async def _load_model_capabilities(self):
        """
        加载模型能力信息
        """
        # 从配置和缓存加载模型能力
        for model_name, capabilities in MODEL_CAPABILITIES.items():
            self.model_capabilities_cache[model_name] = capabilities
            self.capabilities_last_update[model_name] = datetime.now()
        
        # 尝试从Redis加载补充的模型能力信息
        try:
            keys = self.redis_client.keys("model_capabilities:*")
            for key in keys:
                model_name = key.decode('utf-8').split(":")[1]
                capabilities_json = self.redis_client.get(key)
                if capabilities_json:
                    capabilities = json.loads(capabilities_json)
                    self.model_capabilities_cache[model_name] = capabilities
                    self.capabilities_last_update[model_name] = datetime.now()
        except Exception as e:
            logger.error(f"从Redis加载模型能力失败: {str(e)}")
    
    async def _load_performance_history(self):
        """
        加载历史性能数据
        """
        try:
            keys = self.redis_client.keys("model_performance:*")
            for key in keys:
                model_name = key.decode('utf-8').split(":")[1]
                metrics = self.redis_client.hgetall(key)
                
                if metrics and b'response_time' in metrics:
                    rt = float(metrics[b'response_time'])
                    self.response_time_history[model_name].append(rt)
        except Exception as e:
            logger.error(f"加载性能历史失败: {str(e)}")
    
    async def _periodic_update(self):
        """
        定期更新模型能力和性能信息
        """
        while True:
            try:
                await self._update_model_capabilities()
                await self._update_performance_metrics()
            except Exception as e:
                logger.error(f"定期更新失败: {str(e)}")
            
            await asyncio.sleep(300)  # 每5分钟更新一次
    
    async def _update_model_capabilities(self):
        """
        更新模型能力信息
        """
        now = datetime.now()
        
        # 检查是否需要更新某个模型的能力
        for model_name in list(self.model_capabilities_cache.keys()):
            last_update = self.capabilities_last_update.get(model_name, now - timedelta(hours=1))
            
            # 如果超过1小时没更新，重新获取
            if (now - last_update).total_seconds() > 3600:
                try:
                    # 从Redis获取最新的能力信息
                    key = f"model_capabilities:{model_name}"
                    capabilities_json = self.redis_client.get(key)
                    if capabilities_json:
                        capabilities = json.loads(capabilities_json)
                        self.model_capabilities_cache[model_name] = capabilities
                        self.capabilities_last_update[model_name] = now
                except Exception as e:
                    logger.error(f"更新模型{model_name}能力失败: {str(e)}")
    
    async def _update_performance_metrics(self):
        """
        更新性能指标
        """
        # 从Redis获取所有模型的性能指标并更新到缓存
        try:
            keys = self.redis_client.keys("model_performance:*")
            for key in keys:
                model_name = key.decode('utf-8').split(":")[1]
                metrics = self.redis_client.hgetall(key)
                
                if metrics and b'response_time' in metrics:
                    rt = float(metrics[b'response_time'])
                    self.response_time_history[model_name].append(rt)
                    # 保持队列大小
                    if len(self.response_time_history[model_name]) > 100:
                        self.response_time_history[model_name].popleft()
        except Exception as e:
            logger.error(f"更新性能指标失败: {str(e)}")
    
    def analyze_request(self, query: str, **kwargs) -> RequestContext:
        """
        分析请求特征，确定请求类型和所需能力
        """
        request_id = kwargs.get('request_id', f"req_{datetime.now().timestamp()}")
        user_id = kwargs.get('user_id')
        priority = kwargs.get('priority', TaskPriority.MEDIUM)
        
        # 确定请求类型
        request_type = self._detect_request_type(query)
        
        # 确定所需能力
        required_capabilities = self._extract_required_capabilities(query, request_type)
        
        # 构建请求上下文
        context = RequestContext(
            request_id=request_id,
            query=query,
            request_type=request_type,
            user_id=user_id,
            priority=priority,
            timeout=kwargs.get('timeout', 30.0),
            required_capabilities=required_capabilities,
            metadata=kwargs.get('metadata', {})
        )
        
        logger.debug(f"请求分析完成: {request_id} - 类型: {request_type}, 能力需求: {required_capabilities}")
        return context
    
    def _detect_request_type(self, query: str) -> str:
        """
        根据查询内容检测请求类型
        """
        scores = defaultdict(float)
        
        # 检查每个请求模式
        for req_type, patterns in self.request_patterns.items():
            for pattern, weight in patterns:
                if pattern.search(query.lower()):
                    scores[req_type] += weight
        
        # 返回得分最高的类型
        if scores:
            return max(scores.items(), key=lambda x: x[1])[0]
        
        # 默认文本问答
        return 'text_qa'
    
    def _extract_required_capabilities(self, query: str, request_type: str) -> List[str]:
        """
        提取请求所需的能力
        """
        capabilities = []
        
        # 基于请求类型添加基础能力
        if request_type == 'text_qa':
            capabilities.append('general_knowledge')
        elif request_type == 'document_qa':
            capabilities.extend(['reading_comprehension', 'information_extraction'])
        elif request_type == 'multi_modal':
            capabilities.extend(['image_understanding', 'visual_analysis'])
        elif request_type == 'tool_call':
            capabilities.extend(['tool_usage', 'reasoning'])
        elif request_type == 'code_generation':
            capabilities.extend(['coding', 'programming'])
        elif request_type == 'summarization':
            capabilities.extend(['summarization', 'extractive_summarization'])
        
        # 基于查询内容添加特定能力
        query_lower = query.lower()
        
        if any(word in query_lower for word in ['数学', '计算', '统计', '数字']):
            capabilities.append('math')
        
        if any(word in query_lower for word in ['代码', '编程', '函数', '算法']):
            capabilities.append('coding')
        
        if any(word in query_lower for word in ['中文', '汉字', '成语', '诗词']):
            capabilities.append('chinese_language')
        
        if any(word in query_lower for word in ['翻译', 'translat']):
            capabilities.append('translation')
        
        if any(word in query_lower for word in ['创意', '写作', '故事', '诗歌']):
            capabilities.append('creative_writing')
        
        return list(set(capabilities))  # 去重
    
    async def make_routing_decision(self, context: RequestContext) -> RoutingDecision:
        """
        基于请求上下文做出路由决策
        """
        reasoning = []
        
        # 检查是否需要使用云端服务
        use_cloud = await self.model_adapter.should_use_cloud_service()
        if use_cloud:
            reasoning.append("系统资源不足，使用云端服务以保证性能")
            return await self._route_to_cloud(context, reasoning)
        
        # 获取推荐的模型大小
        recommended_size = await self.resource_monitor.get_recommended_model_size()
        reasoning.append(f"系统资源状况推荐使用{recommended_size}模型")
        
        # 获取可用的模型列表
        available_models = await self._get_available_models(recommended_size)
        if not available_models:
            reasoning.append("没有可用的本地模型，切换到云端服务")
            return await self._route_to_cloud(context, reasoning)
        
        # 对模型进行评分和排序
        scored_models = await self._score_models(available_models, context)
        
        if not scored_models:
            reasoning.append("没有合适的模型匹配请求，切换到云端服务")
            return await self._route_to_cloud(context, reasoning)
        
        # 选择最佳模型
        best_model, best_score = max(scored_models, key=lambda x: x[1])
        reasoning.append(f"模型{best_model}评分最高 ({best_score:.2f})，选为最佳模型")
        
        # 获取模型后端信息
        model_info = get_model_resource_info(best_model)
        backend = model_info.get('backend', 'ollama') if model_info else 'ollama'
        
        # 估算响应时间
        estimated_time = self._estimate_response_time(best_model)
        reasoning.append(f"预计响应时间: {estimated_time:.2f}秒")
        
        # 构建路由决策
        decision = RoutingDecision(
            model_backend=backend,
            model_name=best_model,
            is_cloud=False,
            confidence=min(best_score, 1.0),
            estimated_time=estimated_time,
            reasoning=reasoning
        )
        
        logger.info(f"路由决策: 请求{context.request_id} -> 模型{best_model} (评分: {best_score:.2f})")
        return decision
    
    async def _route_to_cloud(self, context: RequestContext, reasoning: List[str]) -> RoutingDecision:
        """
        路由到云端服务
        """
        # 获取云端模型信息
        from middleware.cloud_llm_client import get_cloud_llm_manager
        cloud_manager = get_cloud_llm_manager()
        cloud_client = await cloud_manager.get_fallback_client()
        
        if cloud_client:
            provider_name = cloud_client.provider_name
            model_name = cloud_client.model_name
            reasoning.append(f"使用云端服务 {provider_name} - {model_name}")
            
            # 估算响应时间（通常云端会更慢）
            estimated_time = 1.5  # 默认估算
            
            decision = RoutingDecision(
                model_backend='cloud',
                model_name=model_name,
                is_cloud=True,
                confidence=0.8,
                estimated_time=estimated_time,
                reasoning=reasoning
            )
            
            logger.info(f"云端路由决策: 请求{context.request_id} -> {provider_name} - {model_name}")
            return decision
        else:
            # 云端服务不可用，尝试降级
            reasoning.append("云端服务不可用，尝试使用本地最小模型")
            return await self._route_to_fallback_model(context, reasoning)
    
    async def _route_to_fallback_model(self, context: RequestContext, reasoning: List[str]) -> RoutingDecision:
        """
        路由到降级模型
        """
        # 尝试获取最小的可用模型
        small_models = await self._get_available_models('small')
        if small_models:
            fallback_model = small_models[0]
            reasoning.append(f"使用降级模型: {fallback_model}")
            
            estimated_time = self._estimate_response_time(fallback_model)
            
            return RoutingDecision(
                model_backend='ollama',
                model_name=fallback_model,
                is_cloud=False,
                confidence=0.6,
                estimated_time=estimated_time,
                reasoning=reasoning
            )
        else:
            # 最坏情况，使用fallback客户端
            reasoning.append("所有模型不可用，使用fallback机制")
            return RoutingDecision(
                model_backend='fallback',
                model_name='fallback',
                is_cloud=False,
                confidence=0.3,
                estimated_time=5.0,
                reasoning=reasoning
            )
    
    async def _get_available_models(self, size: str) -> List[str]:
        """
        获取指定大小的可用模型
        """
        try:
            # 从资源监控器获取推荐的模型列表
            models = await self.resource_monitor.get_recommended_models_by_size(size)
            
            # 过滤掉不可用的模型
            available_models = []
            for model in models:
                if await self.resource_monitor.can_load_model(model):
                    available_models.append(model)
            
            return available_models
        except Exception as e:
            logger.error(f"获取可用模型失败: {str(e)}")
            # 返回默认模型
            default_models = {
                'small': ['phi3:3.8b-instruct-q4_K_M'],
                'medium': ['qwen2:7b-instruct-q4_K_M'],
                'large': ['llama3']
            }
            return default_models.get(size, ['phi3:3.8b-instruct-q4_K_M'])
    
    async def _score_models(self, models: List[str], context: RequestContext) -> List[Tuple[str, float]]:
        """
        为模型评分，基于多种因素
        """
        scored_models = []
        
        for model in models:
            try:
                # 计算各项评分
                scores = {
                    'capability_match': await self._calculate_capability_match(model, context.required_capabilities),
                    'performance': await self._get_model_performance_score(model),
                    'resource_fit': await self._calculate_resource_fit(model),
                    'reliability': await self._get_model_reliability_score(model)
                }
                
                # 综合评分 (加权平均)
                total_score = (
                    0.3 * scores['capability_match'] +
                    0.3 * scores['performance'] +
                    0.3 * scores['resource_fit'] +
                    0.1 * scores['reliability']
                )
                
                scored_models.append((model, total_score))
                
            except Exception as e:
                logger.error(f"为模型{model}评分失败: {str(e)}")
        
        return scored_models
    
    async def _calculate_capability_match(self, model_name: str, required_capabilities: List[str]) -> float:
        """
        计算模型能力与请求需求的匹配度
        """
        model_capabilities = self.model_capabilities_cache.get(model_name, [])
        
        if not required_capabilities:
            return 0.5  # 默认中等匹配度
        
        if not model_capabilities:
            return 0.3  # 未知能力，低匹配度
        
        # 计算匹配的能力数量
        matched = sum(1 for cap in required_capabilities if cap in model_capabilities)
        match_ratio = matched / len(required_capabilities)
        
        return match_ratio
    
    async def _get_model_performance_score(self, model_name: str) -> float:
        """
        获取模型性能评分
        """
        try:
            # 从Redis获取性能指标
            key = f"model_performance:{model_name}"
            metrics = self.redis_client.hgetall(key)
            
            if not metrics:
                return 0.5  # 默认中等性能
            
            # 解析指标
            success_rate = float(metrics.get(b'success_rate', 0.5))
            avg_response_time = float(metrics.get(b'avg_response_time', 2.0))
            resource_efficiency = float(metrics.get(b'resource_efficiency', 0.7))
            
            # 归一化响应时间 (更快更好)
            # 假设最佳响应时间是0.5秒，最差是10秒
            rt_score = max(0, min(1, (10 - avg_response_time) / 9.5))
            
            # 综合评分
            score = (
                self.performance_weights['success_rate'] * success_rate +
                self.performance_weights['response_time'] * rt_score +
                self.performance_weights['resource_efficiency'] * resource_efficiency
            )
            
            return score
        except Exception as e:
            logger.error(f"获取模型{model_name}性能评分失败: {str(e)}")
            return 0.5
    
    async def _calculate_resource_fit(self, model_name: str) -> float:
        """
        计算模型与系统资源的契合度
        """
        try:
            # 检查模型是否可以加载
            can_load = await self.resource_monitor.can_load_model(model_name)
            if not can_load:
                return 0.0
            
            # 获取模型资源信息
            model_info = get_model_resource_info(model_name)
            if not model_info:
                return 0.5
            
            # 获取系统资源状态
            memory_usage = await self.resource_monitor.get_memory_usage()
            cpu_usage = await self.resource_monitor.get_cpu_usage()
            
            # 计算资源余量
            memory_margin = 100 - memory_usage - (model_info.get('memory_mb', 0) / 1024)
            cpu_margin = 100 - cpu_usage - (model_info.get('cpu_cores', 0) * 10)
            
            # 计算契合度 (资源余量越多越好)
            memory_fit = max(0, min(1, memory_margin / 50))  # 50%余量为最佳
            cpu_fit = max(0, min(1, cpu_margin / 50))
            
            return (memory_fit + cpu_fit) / 2
        except Exception as e:
            logger.error(f"计算模型{model_name}资源契合度失败: {str(e)}")
            return 0.5
    
    async def _get_model_reliability_score(self, model_name: str) -> float:
        """
        获取模型可靠性评分
        """
        try:
            # 从Redis获取可靠性指标
            key = f"model_reliability:{model_name}"
            reliability = self.redis_client.hgetall(key)
            
            if reliability and b'success_rate' in reliability:
                return float(reliability[b'success_rate'])
            
            return 0.8  # 默认可靠性较高
        except Exception as e:
            logger.error(f"获取模型{model_name}可靠性评分失败: {str(e)}")
            return 0.8
    
    def _estimate_response_time(self, model_name: str) -> float:
        """
        估算模型响应时间
        """
        # 使用历史响应时间估算
        history = self.response_time_history.get(model_name, [])
        
        if history:
            return sum(history) / len(history)
        else:
            # 基于模型大小的默认估算
            if 'small' in model_name.lower() or '3b' in model_name.lower():
                return 0.8
            elif 'medium' in model_name.lower() or '7b' in model_name.lower():
                return 1.5
            else:
                return 2.5
    
    async def execute_route(self, context: RequestContext, decision: RoutingDecision) -> Dict[str, Any]:
        """
        执行路由决策
        """
        start_time = datetime.now()
        success = False
        result = {}
        
        try:
            if decision.is_cloud:
                # 使用云端模型
                from middleware.cloud_llm_client import get_cloud_llm_manager
                cloud_manager = get_cloud_llm_manager()
                cloud_client = await cloud_manager.get_client_by_model(decision.model_name)
                
                if cloud_client:
                    result = await cloud_client.generate(context.query)
                    success = True
                else:
                    raise Exception(f"找不到指定的云端模型: {decision.model_name}")
            elif decision.model_backend == 'fallback':
                # 使用降级方案
                from middleware.model_client import FallbackClient
                fallback_client = FallbackClient()
                result = await fallback_client.generate(context.query)
                success = True
            else:
                # 使用本地模型
                # 优先使用模型适配器
                model_adapter = get_model_adapter()
                result = await model_adapter.generate(context.query)
                success = True
                
        except Exception as e:
            logger.error(f"执行路由失败: {str(e)}")
            success = False
            result = {"error": str(e)}
        finally:
            # 更新统计信息
            elapsed = (datetime.now() - start_time).total_seconds()
            self._update_routing_statistics(decision.model_name, success, elapsed, context.request_type)
            
            # 更新响应时间历史
            self.response_time_history[decision.model_name].append(elapsed)
            
            # 缓存性能指标到Redis
            await self._cache_performance_metrics(decision.model_name, success, elapsed)
        
        return {
            "result": result,
            "model": decision.model_name,
            "is_cloud": decision.is_cloud,
            "elapsed_time": elapsed,
            "success": success
        }
    
    def _update_routing_statistics(self, model_name: str, success: bool, elapsed: float, request_type: str):
        """
        更新路由统计信息
        """
        stats_key = f"{model_name}:{request_type}"
        
        self.routing_statistics[stats_key]['total'] += 1
        if success:
            self.routing_statistics[stats_key]['success'] += 1
        else:
            self.routing_statistics[stats_key]['failure'] += 1
        
        # 如果超过超时时间，记录超时
        if elapsed > 30.0:
            self.routing_statistics[stats_key]['timeout'] += 1
    
    async def _cache_performance_metrics(self, model_name: str, success: bool, elapsed: float):
        """
        缓存性能指标到Redis
        """
        try:
            # 更新性能指标
            performance_key = f"model_performance:{model_name}"
            
            # 使用Redis的原子操作更新指标
            pipe = self.redis_client.pipeline()
            
            # 更新请求计数
            pipe.hincrby(performance_key, 'total_requests', 1)
            if success:
                pipe.hincrby(performance_key, 'successful_requests', 1)
            else:
                pipe.hincrby(performance_key, 'failed_requests', 1)
            
            # 更新平均响应时间 (简单的移动平均)
            current_avg_bytes = await self.redis_client.hget(performance_key, 'avg_response_time')
            current_count_bytes = await self.redis_client.hget(performance_key, 'response_time_count')
            
            if current_avg_bytes and current_count_bytes:
                current_avg = float(current_avg_bytes)
                current_count = int(current_count_bytes)
                new_avg = (current_avg * current_count + elapsed) / (current_count + 1)
                pipe.hset(performance_key, 'avg_response_time', new_avg)
                pipe.hset(performance_key, 'response_time_count', current_count + 1)
            else:
                pipe.hset(performance_key, 'avg_response_time', elapsed)
                pipe.hset(performance_key, 'response_time_count', 1)
            
            # 更新成功率
            total = await self.redis_client.hget(performance_key, 'total_requests')
            successful = await self.redis_client.hget(performance_key, 'successful_requests')
            
            if total and successful:
                success_rate = int(successful) / int(total)
                pipe.hset(performance_key, 'success_rate', success_rate)
            
            # 设置过期时间 (7天)
            pipe.expire(performance_key, 604800)
            
            await pipe.execute()
            
            # 更新可靠性指标
            reliability_key = f"model_reliability:{model_name}"
            await self.redis_client.hset(reliability_key, 'success_rate', success_rate if total else 1.0)
            await self.redis_client.expire(reliability_key, 604800)
            
        except Exception as e:
            logger.error(f"缓存性能指标失败: {str(e)}")
    
    async def route_request(self, query: str, **kwargs) -> Dict[str, Any]:
        """
        路由请求的主入口
        
        Args:
            query: 请求查询文本
            **kwargs: 其他参数，包括request_id, user_id, priority等
            
        Returns:
            路由结果，包含模型输出、使用的模型等信息
        """
        # 分析请求
        context = self.analyze_request(query, **kwargs)
        
        # 记录请求开始
        logger.info(f"开始处理请求: {context.request_id}, 类型: {context.request_type}")
        
        # 做出路由决策
        decision = await self.make_routing_decision(context)
        
        # 执行路由
        result = await self.execute_route(context, decision)
        
        # 添加决策信息到结果
        result['decision_reasoning'] = decision.reasoning
        result['request_id'] = context.request_id
        
        logger.info(f"请求{context.request_id}处理完成，耗时: {result['elapsed_time']:.2f}秒")
        
        return result
    
    def get_routing_statistics(self) -> Dict[str, Any]:
        """
        获取路由统计信息
        """
        return dict(self.routing_statistics)
    
    async def update_model_capabilities(self, model_name: str, capabilities: List[str]):
        """
        更新模型能力信息
        """
        # 更新缓存
        self.model_capabilities_cache[model_name] = capabilities
        self.capabilities_last_update[model_name] = datetime.now()
        
        # 缓存到Redis
        try:
            key = f"model_capabilities:{model_name}"
            self.redis_client.setex(
                key,
                86400,  # 24小时过期
                json.dumps(capabilities)
            )
            logger.info(f"更新模型{model_name}的能力信息: {capabilities}")
        except Exception as e:
            logger.error(f"缓存模型能力失败: {str(e)}")


async def get_smart_router() -> SmartRouter:
    """
    获取智能路由实例
    """
    return await SmartRouter.get_instance()


async def init_smart_router():
    """
    初始化智能路由系统
    """
    router = await get_smart_router()
    await router.initialize()
    logger.info("智能路由系统初始化完成")


# 从资源监控器导入必要的函数
def get_model_resource_info(model_name: str) -> Optional[Dict[str, Any]]:
    """
    获取模型资源信息
    """
    from app.core.resource_monitor import get_model_resource_info as _get_info
    return _get_info(model_name)
