from typing import Optional, List, Dict, Union
from litellm.types.router import CustomRoutingStrategyBase
from litellm import Cache, Router
import random
import asyncio
import re
import time
import os
import json
import numpy as np
from openai import OpenAI
from custom_semantic_cache import CustomSemanticCache

class CustomRoutingStrategy(CustomRoutingStrategyBase):
    def __init__(self):
        # 定义关键词及其权重
        super().__init__()
        self.last_used_model = None
        self._router = None

        self.client = OpenAI(
            api_key=os.getenv("OPENAI_API_KEY"),
            base_url=os.getenv("OPENAI_API_BASE")
        )

        self.router_cache = CustomSemanticCache()
#         all_attributes = dir(self.router_cache)

# # 过滤出方法
#         methods = [attr for attr in all_attributes if callable(getattr(self.router_cache, attr))]

# # 打印方法
#         print("CustomSemanticCache 的方法：")
#         for method in methods:
#             print(method)


        
    def set_router(self, router):
        """
        设置路由器实例
        """
        self._router = router

    def analyze_complexity(self, content: str) -> bool:
        """
        分析输入内容的复杂度
        返回 True 表示复杂，需要使用大模型
        返回 False 表示简单，可以使用小模型
        """
        # 复杂度得分
        score = 0
        
        # 高权重指标 (每个+3分)
        high_weight_patterns = [
            r"分析",
            r"优缺点|优化",
            r"系统|架构",
            r"请详细|详细说明",
            r"\d+\.",  # 匹配编号列表
            r"对比|比较",
            r"原理|机制|流程"
        ]
        
        # 中权重指标 (每个+2分)
        medium_weight_patterns = [
            r"为什么|如何|怎么",
            r"解释",
            r"学习|应用|实现|设计",
            r"详细|原理|区别|步骤",
            r".{100,}"  # 长文本
        ]
        
        # 低权重指标 (每个+1分)
        low_weight_patterns = [
            r"\?|？",  # 问号
            r"和|与|以及",  # 多个问题
            r"建议|推荐",
            r"问题|错误|异常|bug"
        ]
        
        # 计算得分
        matched_patterns = []
        
        # 检查高权重模式
        for pattern in high_weight_patterns:
            matches = re.findall(pattern, content)
            if matches:
                score += 3
                matched_patterns.append(f"- 高权重: 匹配词: {', '.join(matches)} (+3分)")
        
        # 检查中权重模式
        for pattern in medium_weight_patterns:
            matches = re.findall(pattern, content)
            if matches:
                score += 2
                matched_patterns.append(f"- 中权重: 匹配词: {', '.join(matches)} (+2分)")
        
        # 检查低权重模式
        for pattern in low_weight_patterns:
            matches = re.findall(pattern, content)
            if matches:
                score += 1
                matched_patterns.append(f"- 低权重: 匹配词: {', '.join(matches)} (+1分)")
        
        print(f"问题复杂度得分: {score}")
        if matched_patterns:
            print("匹配的模式:")
            for pattern in matched_patterns:
                print(pattern)
        
        # 如果总分达到8分或以上，认为是复杂问题
        return score >= 8  

    def calculate_model_scores(self, models: List[Dict]) -> Dict[str, float]:
        """
        计算每个模型的得分
        """
        scores = {}
        
        for model in models:
            # 获取模型信息
            model_info = model.get("model_info", {})
            if not model_info:
                continue
                
            # 获取性能指标
            first_token_time = float(model_info.get("first_token_time", 0))
            token_per_second = float(model_info.get("token_per_second", 0))
            cost_per_token = float(model_info.get("cost_per_token", 0))
            
            # 归一化处理
            normalized_first_token = 1 - (first_token_time / 10)  # 假设最大响应时间为10秒
            normalized_token_speed = token_per_second / 100  # 假设最大速度为100 token/s
            normalized_cost = 1 - (cost_per_token / 0.5)  # 假设最高成本为0.5/token
            
            # 计算总分
            total_score = (
                normalized_first_token * 0.4 +  # 首token响应时间权重
                normalized_token_speed * 0.4 +  # 生成速度权重
                normalized_cost * 0.2  # 成本权重
            )
            
            # 使用模型的完整标识符作为键
            model_key = model["litellm_params"]["model"]
            scores[model_key] = max(0, min(1, total_score))  # 确保分数在0-1之间
        
        return scores

    async def async_get_available_deployment(
        self,
        model: str,
        messages: List[Dict[str, str]],
        **kwargs
    ) -> Dict:
        """
        异步获取可用的模型部署
        """
        deployments = self._router.model_list
        #sss
        if not deployments:
            raise ValueError("没有可用的模型部署")

        # 将部署按照模型大小分组
        small_deployments = [d for d in deployments if d["model_name"] == "deepseek-small"]
        big_deployments = [d for d in deployments if d["model_name"] == "deepseek-big"]
        
        print("\n=== 模型部署分组信息 ===")
        print("Small模型组:")
        for d in small_deployments:
            print(f"  - {d['litellm_params']['model']}")
        print("\nBig模型组:")
        for d in big_deployments:
            print(f"  - {d['litellm_params']['model']}")
        
        if not small_deployments and not big_deployments:
            raise ValueError("没有找到合适的模型部署")
                
        # 分析问题复杂度
        is_complex = self.analyze_complexity(messages[0].get("content", "")) if messages else False
        print(f"\n问题类型: {'复杂问题' if is_complex else '简单问题'}")
                
        # 根据问题复杂度选择模型组
        target_deployments = big_deployments if is_complex else small_deployments
        
        if not target_deployments:
            # 如果目标组没有可用部署，使用另一个组
            target_deployments = small_deployments if is_complex else big_deployments
            if not target_deployments:
                raise ValueError("没有可用的模型部署")
            print(f"警告：没有找到{'复杂' if is_complex else '简单'}问题的专用模型，使用备选模型组")
        
        print("\n=== 目标模型组得分计算 ===")
        # 在目标组内计算模型得分
        deployment_scores = {}
        for deployment in target_deployments:
            model_name = deployment["litellm_params"]["model"]
            print(f"\n评估模型: {model_name}")
            
            # 计算综合得分：
            # 1. 首个token响应时间越短越好
            # 2. 每秒token生成速度越快越好
            # 3. 成本越低越好
            first_token_time = float(deployment["model_info"]["first_token_time"])
            token_speed = float(deployment["model_info"]["token_per_second"])
            cost = float(deployment["model_info"]["cost_per_token"])
            
            # 归一化各个指标（越高越好）
            time_score = 1.0 / first_token_time
            speed_score = token_speed / 100.0  # 假设最大速度为100
            cost_score = 1.0 / cost
            
            print(f"  响应时间: {first_token_time}s -> 得分: {time_score:.4f}")
            print(f"  生成速度: {token_speed} token/s -> 得分: {speed_score:.4f}")
            print(f"  Token成本: {cost} -> 得分: {cost_score:.4f}")
            
            # 计算加权得分
            score = (time_score * 0.3) + (speed_score * 0.4) + (cost_score * 0.3)
            deployment_scores[model_name] = score
            print(f"  综合得分: {score:.4f}")
        
        print("\n=== 最终选择 ===")
        # 按得分排序并显示
        sorted_scores = sorted(deployment_scores.items(), key=lambda x: x[1], reverse=True)
        for model_name, score in sorted_scores:
            print(f"模型: {model_name}, 得分: {score:.4f}")
        
        # 选择得分最高的部署
        selected_model = max(deployment_scores.items(), key=lambda x: x[1])[0]
        print(f"\n选定模型: {selected_model}")
        
        # 找到选中模型的完整配置
        for deployment in target_deployments:
            if deployment["litellm_params"]["model"] == selected_model:
                return deployment
                
        raise ValueError(f"未找到模型 {selected_model} 的配置")

    def get_available_deployment(
        self,
        model: str,
        messages: Optional[List[Dict[str, str]]] = None,
        specific_deployment: Optional[bool] = False,
    ) -> Dict:
        """
        同步获取可用的模型部署
        """
        loop = asyncio.new_event_loop()
        asyncio.set_event_loop(loop)
        try:
            return loop.run_until_complete(
                self.async_get_available_deployment(
                    model=model,
                    messages=messages,
                    specific_deployment=specific_deployment
                )
            )
        finally:
            loop.close()
