# utils/online_model_service.py
import asyncio
import json
from os import wait
from typing import Dict, List, Optional, Any
import litellm
from litellm import completion
import time
import logging

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

try:
    from config.api_config import get_config_for_service, get_available_services, has_service
except ImportError:
    # 备用导入路径
    import sys
    from pathlib import Path
    sys.path.append(str(Path(__file__).parent.parent))
    from config.api_config import get_config_for_service, get_available_services, has_service

class OnlineModelService:
    """在线模型服务 - 通过LiteLLM访问各种在线LLM"""
    
    def __init__(self, service_name: str = "default", api_key: Optional[str] = None, default_model: str = "gpt-3.5-turbo"):
        # 尝试从配置管理器加载配置
        self.service_name = service_name
        self.config = get_config_for_service(service_name)
        if self.config:
            self.api_key = self.config.api_key
            self.default_model = self.config.default_model
            # 根据提供商设置不同的后备模型
            if self.config.provider == "ollama":
                self.fallback_models = ["ollama/qwen3:4b", "ollama/llama3.2", "ollama/mistral", "ollama/phi3", "ollama/gemma2"]
            elif self.config.provider == "openrouter":
                self.fallback_models = ["nousresearch/hermes-3-llama-3.1-405b:free", "microsoft/phi-3.5-mini-128k-instruct:free", "google/gemma-2-9b-it"]
            elif self.config.provider == "anthropic":
                self.fallback_models = ["claude-3-haiku-20240307", "claude-3-sonnet-20240229", "claude-3-opus-20240229"]
            else:  # OpenAI等
                self.fallback_models = ["gpt-3.5-turbo", "gpt-4o-mini", "gpt-4", "gpt-4o"]
        else:
            # 如果没有配置，使用传入的参数
            self.api_key = api_key
            self.default_model = default_model
            self.fallback_models = ["gpt-3.5-turbo", "gpt-4", "claude-3-sonnet-20240229"]
        
        self.request_timeout = 30 # 请求超时时间（秒）
        # 设置litellm的基础URL（如果配置了的话）
        if self.config and self.config.base_url:
            litellm.api_base = self.config.base_url
        
    def set_api_key(self, api_key: str):
        """设置API密钥"""
        self.api_key = api_key
        litellm.api_key = api_key
    
    async def call_model(
        self, 
        prompt: str, 
        model: Optional[str] = None, 
        system_message: Optional[str] = None,
        max_tokens: int = 1000,
        temperature: float = 0.7,
        **kwargs
    ) -> Optional[Dict[str, Any]]:
        """调用在线模型"""
        # 对于Ollama，不需要API密钥
        if not self.api_key and self.config and self.config.provider != "ollama":
            logger.warning("API密钥未设置，无法调用在线模型")
            return None
            
        model = model or self.default_model
        litellm.api_key = self.api_key
        
        messages = []
        if system_message:
            messages.append({"role": "system", "content": system_message})
        messages.append({"role": "user", "content": prompt})
        
        # 尝试调用模型，支持故障转移
        for attempt, current_model in enumerate(self.fallback_models if model == "fallback" else [model]):
            try:
                logger.info(f"尝试使用模型: {current_model}")
                
                response = await asyncio.wait_for(
                    self._make_completion_call(
                        model=current_model,
                        messages=messages,
                        max_tokens=max_tokens,
                        temperature=temperature,
                        **kwargs
                    ),
                    timeout=self.request_timeout
                )
                
                return {
                    "model": current_model,
                    "content": response.choices[0].message.content,
                    "usage": response.usage if hasattr(response, 'usage') else None,
                    "success": True
                }
                
            except asyncio.TimeoutError:
                logger.warning(f"模型 {current_model} 请求超时")
                continue
            except Exception as e:
                logger.warning(f"模型 {current_model} 调用失败: {str(e)}")
                continue
        
        logger.error("所有模型调用均失败")
        return None
    
    async def _make_completion_call(self, **kwargs):
        """执行实际的completion调用"""
        return completion(**kwargs)
    
    async def call_with_context(
        self,
        prompt: str,
        context: List[Dict[str, str]],  # 对话历史
        model: Optional[str] = None,
        system_message: Optional[str] = None,
        max_tokens: int = 1000,
        temperature: float = 0.7
    ) -> Optional[Dict[str, Any]]:
        """带上下文的模型调用"""
        # 对于Ollama，不需要API密钥
        if not self.api_key and self.config and self.config.provider != "ollama":
            logger.warning("API密钥未设置，无法调用在线模型")
            return None
            
        model = model or self.default_model
        litellm.api_key = self.api_key
        
        messages = []
        if system_message:
            messages.append({"role": "system", "content": system_message})
        
        # 添加上下文
        for msg in context:
            messages.append(msg)
        
        # 添加当前请求
        messages.append({"role": "user", "content": prompt})
        
        # 尝试调用模型
        for attempt, current_model in enumerate(self.fallback_models if model == "fallback" else [model]):
            try:
                logger.info(f"尝试使用模型 (带上下文): {current_model}")
                
                response = await asyncio.wait_for(
                    self._make_completion_call(
                        model=current_model,
                        messages=messages,
                        max_tokens=max_tokens,
                        temperature=temperature
                    ),
                    timeout=self.request_timeout
                )
                
                return {
                    "model": current_model,
                    "content": response.choices[0].message.content,
                    "usage": response.usage if hasattr(response, 'usage') else None,
                    "success": True
                }
                
            except asyncio.TimeoutError:
                logger.warning(f"模型 {current_model} 请求超时")
                continue
            except Exception as e:
                logger.warning(f"模型 {current_model} 调用失败: {str(e)}")
                continue
        
        logger.error("所有模型调用均失败")
        return None
    
    async def batch_call(
        self,
        prompts: List[str],
        model: Optional[str] = None,
        system_message: Optional[str] = None,
        max_tokens: int = 1000,
        temperature: float = 0.7
    ) -> List[Optional[Dict[str, Any]]]:
        """批量调用模型"""
        tasks = []
        for prompt in prompts:
            task = self.call_model(
                prompt=prompt,
                model=model,
                system_message=system_message,
                max_tokens=max_tokens,
                temperature=temperature
            )
            tasks.append(task)
        
        results = await asyncio.gather(*tasks, return_exceptions=True)
        
        # 处理异常
        processed_results = []
        for result in results:
            if isinstance(result, Exception):
                logger.error(f"批量调用中的异常: {result}")
                processed_results.append(None)
            else:
                processed_results.append(result)
        
        return processed_results

# 使用示例和测试函数
async def test_online_model_service():
    """测试函数 - 演示如何使用在线模型服务"""
    # 使用配置的服务（优先使用Ollama等本地模型）
    service = OnlineModelService(service_name="ollama")  # 尝试使用Ollama服务
    
    # 示例：简单调用
    result = await service.call_model(
        prompt="请简要介绍一下机器学习的概念",
        system_message="你是一个专业的AI助手，用简洁明了的语言回答问题"
    )
    
    if result and result["success"]:
        print(f"模型: {result['model']}")
        print(f"回复: {result['content']}")
    else:
        print("调用失败")
        # 如果Ollama服务不可用，尝试使用默认服务
        service = OnlineModelService(default_model="gpt-3.5-turbo")
        result = await service.call_model(
            prompt="请简要介绍一下机器学习的概念",
            system_message="你是一个专业的AI助手，用简洁明了的语言回答问题"
        )
        if result and result["success"]:
            print(f"模型: {result['model']}")
            print(f"回复: {result['content']}")
        else:
            print("所有服务调用都失败了")
    
    # 示例：带上下文的调用
    context = [
        {"role": "user", "content": "我们正在讨论翁法洛斯项目"},
        {"role": "assistant", "content": "翁法洛斯是一个AI智能体训练平台"}
    ]
    
    context_result = await service.call_with_context(
        prompt="请进一步解释这个项目的架构",
        context=context
    )

    wait(30)
    
    if context_result and context_result["success"]:
        print(f"带上下文的回复: {context_result['content']}")
    else:
        print("带上下文的调用失败")
    
    return service

if __name__ == "__main__":
    # 注意：这个测试需要有效的API密钥或Ollama服务才能运行
    asyncio.run(test_online_model_service())
    pass
