from typing import Optional, List, Dict, Union
from litellm import Router
from litellm.router import CustomRoutingStrategyBase

class ModelPriorityRouter(CustomRoutingStrategyBase):
    """
    自定义路由策略，基于以下规则选择模型：
    1. 根据消息长度选择不同的模型
    2. 根据请求的优先级选择不同的模型
    3. 根据模型的负载情况选择模型
    """
    
    async def async_get_available_deployment(
        self,
        model: str,
        messages: Optional[List[Dict[str, str]]] = None,
        input: Optional[Union[str, List]] = None,
        specific_deployment: Optional[bool] = False,
        request_kwargs: Optional[Dict] = None,
    ):
        """
        异步获取可用的模型部署

        Args:
            model: 模型名称
            messages: 消息列表
            input: 输入文本
            specific_deployment: 是否需要特定部署
            request_kwargs: 额外的请求参数
        """
        print(f"Processing request for model: {model}")
        model_list = self.router.model_list
        
        # 1. 获取消息长度
        message_length = 0
        if messages:
            message_length = sum(len(str(m.get("content", ""))) for m in messages)
        
        # 2. 获取请求优先级 (从request_kwargs中获取)
        priority = request_kwargs.get("priority", "normal") if request_kwargs else "normal"
        
        # 3. 根据规则选择模型
        for deployment in model_list:
            model_config = deployment.get("litellm_params", {})
            model_info = deployment.get("model_info", {})
            
            # 示例：根据消息长度选择模型
            if message_length > 1000 and model_info.get("capacity") == "high":
                return deployment
            
            # 示例：高优先级请求使用快速模型
            if priority == "high" and model_info.get("speed") == "fast":
                return deployment
            
            # 示例：默认使用通用模型
            if model_info.get("type") == "general":
                return deployment
        
        # 如果没有找到合适的模型，返回第一个可用的模型
        return model_list[0] if model_list else None

    def get_available_deployment(
        self,
        model: str,
        messages: Optional[List[Dict[str, str]]] = None,
        input: Optional[Union[str, List]] = None,
        specific_deployment: Optional[bool] = False,
        request_kwargs: Optional[Dict] = None,
    ):
        """同步版本的获取可用部署方法"""
        pass

# 初始化路由器
def init_router():
    router = Router(
        model_list=[
            {
                "model_name": "gpt-4",
                "litellm_params": {
                    "model": "gpt-4",
                    "api_key": "your-api-key-1",
                },
                "model_info": {
                    "type": "general",
                    "capacity": "high",
                    "speed": "normal"
                }
            },
            {
                "model_name": "gpt-3.5-turbo",
                "litellm_params": {
                    "model": "gpt-3.5-turbo",
                    "api_key": "your-api-key-2",
                },
                "model_info": {
                    "type": "general",
                    "capacity": "medium",
                    "speed": "fast"
                }
            },
        ],
        set_verbose=True,
        debug_level="DEBUG",
    )
    
    # 设置自定义路由策略
    router.set_custom_routing_strategy(ModelPriorityRouter())
    return router

# 使用示例
async def test_router():
    router = init_router()
    
    # 测试不同场景
    test_messages = [
        # 短消息测试
        [{"role": "user", "content": "Hello!"}],
        # 长消息测试
        [{"role": "user", "content": "Hello! " * 500}],
    ]
    
    for messages in test_messages:
        try:
            response = await router.acompletion(
                model="gpt-4",
                messages=messages,
                request_kwargs={"priority": "high"}
            )
            print(f"Response: {response}")
            print(f"Selected model: {response._hidden_params.get('model_id')}")
        except Exception as e:
            print(f"Error: {e}")

if __name__ == "__main__":
    import asyncio
    asyncio.run(test_router())
