#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
LLM适配器 - 统一的LLM接口包装器
"""

import asyncio
from typing import Dict, List, Any, Optional, Union
import litellm
from litellm import completion, acompletion

from data_engine.utils.logger import get_logger

logger = get_logger(__name__)


class LLMClient:
    """统一的LLM客户端接口"""
    
    def __init__(self, 
                 model: str,
                 api_key: str,
                 api_base: Optional[str] = None,
                 **kwargs):
        """
        初始化LLM客户端
        
        Args:
            model: 模型名称
            api_key: API密钥
            api_base: API基础URL
            **kwargs: 其他LiteLLM参数
        """
        self.model = model
        self.api_key = api_key
        self.api_base = api_base
        self.kwargs = kwargs
        
        logger.info(f"LLM客户端初始化完成: model={model}")
    
    async def acompletion(self, messages: List[Dict[str, str]], **kwargs) -> Any:
        """
        异步LLM调用
        
        Args:
            messages: 消息列表
            **kwargs: 其他参数
            
        Returns:
            LiteLLM响应对象
        """
        try:
            # 合并参数
            params = {
                "model": self.model,
                "messages": messages,
                "api_key": self.api_key,
                **self.kwargs,
                **kwargs
            }
            
            if self.api_base:
                params["api_base"] = self.api_base
            
            logger.debug(f"异步调用LLM: model={self.model}, messages_count={len(messages)}")
            
            response = await acompletion(**params)
            
            logger.debug(f"异步LLM调用成功: model={self.model}")
            return response
            
        except Exception as e:
            logger.error(f"异步LLM调用失败: model={self.model}, error={str(e)}")
            raise
    
    def completion(self, messages: List[Dict[str, str]], **kwargs) -> Any:
        """
        同步LLM调用
        
        Args:
            messages: 消息列表
            **kwargs: 其他参数
            
        Returns:
            LiteLLM响应对象
        """
        try:
            # 合并参数
            params = {
                "model": self.model,
                "messages": messages,
                "api_key": self.api_key,
                **self.kwargs,
                **kwargs
            }
            
            if self.api_base:
                params["api_base"] = self.api_base
            
            logger.debug(f"调用LLM: model={self.model}, messages_count={len(messages)}")
            
            response = completion(**params)
            
            logger.debug(f"LLM调用成功: model={self.model}")
            return response
            
        except Exception as e:
            logger.error(f"LLM调用失败: model={self.model}, error={str(e)}")
            raise
    
    def get_response_content(self, response: Any) -> str:
        """
        从LiteLLM响应中提取内容
        
        Args:
            response: LiteLLM响应对象
            
        Returns:
            响应内容字符串
        """
        try:
            if hasattr(response, 'choices') and response.choices:
                choice = response.choices[0]
                if hasattr(choice, 'message') and hasattr(choice.message, 'content'):
                    return choice.message.content
                elif hasattr(choice, 'text'):
                    return choice.text
            
            # 如果是字典格式
            if isinstance(response, dict):
                if 'choices' in response and response['choices']:
                    choice = response['choices'][0]
                    if 'message' in choice and 'content' in choice['message']:
                        return choice['message']['content']
                    elif 'text' in choice:
                        return choice['text']
            
            logger.warning(f"无法解析LLM响应: {type(response)}")
            return str(response)
            
        except Exception as e:
            logger.error(f"解析LLM响应失败: {str(e)}")
            return str(response)


# 向后兼容的别名
LLMAdapter = LLMClient


def create_qwen_client(api_key: str, model: str = "qwen-turbo", **kwargs) -> LLMClient:
    """
    创建阿里云DashScope客户端
    
    Args:
        api_key: API密钥
        model: 模型名称
        **kwargs: 其他参数
        
    Returns:
        LLMClient实例
    """
    default_config = {
        "api_base": "https://dashscope.aliyuncs.com/compatible-mode/v1",
        "temperature": 0.1,
        "max_tokens": 4000
    }
    
    final_config = {**default_config, **kwargs}
    
    # 确保模型名称包含提供商前缀
    if not model.startswith("openai/"):
        model = f"openai/{model}"
    
    return LLMClient(
        model=model,
        api_key=api_key,
        **final_config
    )


def create_openai_client(api_key: str, model: str = "gpt-3.5-turbo", **kwargs) -> LLMClient:
    """
    创建OpenAI客户端
    
    Args:
        api_key: API密钥
        model: 模型名称
        **kwargs: 其他参数
        
    Returns:
        LLMClient实例
    """
    default_config = {
        "temperature": 0.1,
        "max_tokens": 4000
    }
    
    final_config = {**default_config, **kwargs}
    
    return LLMClient(
        model=model,
        api_key=api_key,
        **final_config
    )


def create_claude_client(api_key: str, model: str = "claude-3-haiku-20240307", **kwargs) -> LLMClient:
    """
    创建Anthropic Claude客户端
    
    Args:
        api_key: API密钥
        model: 模型名称
        **kwargs: 其他参数
        
    Returns:
        LLMClient实例
    """
    default_config = {
        "temperature": 0.1,
        "max_tokens": 4000
    }
    
    final_config = {**default_config, **kwargs}
    
    return LLMClient(
        model=model,
        api_key=api_key,
        **final_config
    ) 