"""
大语言模型客户端
支持OpenAI API和流式响应
"""
from typing import Optional, List, Dict, Any, AsyncIterator
import time
from openai import AsyncOpenAI, OpenAIError
from loguru import logger
from tenacity import (
    retry,
    stop_after_attempt,
    wait_exponential,
    retry_if_exception_type,
    before_sleep_log
)

from app.config import settings


class LLMClient:
    """LLM客户端封装"""
    
    def __init__(self):
        """初始化OpenAI客户端"""
        self.client: Optional[AsyncOpenAI] = None
        self.model = settings.OPENAI_MODEL
        self.temperature = settings.OPENAI_TEMPERATURE
        self.max_tokens = settings.OPENAI_MAX_TOKENS
        
    def init(self):
        """初始化客户端"""
        if not settings.OPENAI_API_KEY:
            logger.warning("OpenAI API Key未配置，LLM功能将不可用")
            return
        
        try:
            self.client = AsyncOpenAI(
                api_key=settings.OPENAI_API_KEY,
                base_url=settings.OPENAI_API_BASE,
                timeout=settings.OPENAI_TIMEOUT,
            )
            logger.info(f"LLM客户端初始化成功 - 模型: {self.model}")
        except Exception as e:
            logger.error(f"LLM客户端初始化失败: {e}")
            raise
    
    @retry(
        stop=stop_after_attempt(3),  # 最多重试3次
        wait=wait_exponential(multiplier=1, min=2, max=10),  # 指数退避：2s, 4s, 8s
        retry=retry_if_exception_type((OpenAIError, TimeoutError, ConnectionError)),
        before_sleep=before_sleep_log(logger, "WARNING"),
        reraise=True
    )
    async def chat(
        self,
        messages: List[Dict[str, str]],
        temperature: Optional[float] = None,
        max_tokens: Optional[int] = None,
        **kwargs
    ) -> Dict[str, Any]:
        """
        对话生成（非流式）
        
        Args:
            messages: 消息列表 [{"role": "user", "content": "..."}]
            temperature: 温度参数（0-2）
            max_tokens: 最大token数
            **kwargs: 其他参数
            
        Returns:
            响应字典，包含content、tokens、cost等信息
        """
        if not self.client:
            raise RuntimeError("LLM客户端未初始化，请先配置OPENAI_API_KEY")
        
        start_time = time.time()
        
        try:
            response = await self.client.chat.completions.create(
                model=self.model,
                messages=messages,
                temperature=temperature or self.temperature,
                max_tokens=max_tokens or self.max_tokens,
                **kwargs
            )
            
            duration = time.time() - start_time
            
            # 提取响应
            choice = response.choices[0]
            message = choice.message
            content = message.content
            finish_reason = choice.finish_reason
            
            # 检查是否有工具调用
            tool_calls = []
            if hasattr(message, 'tool_calls') and message.tool_calls:
                for tool_call in message.tool_calls:
                    tool_calls.append({
                        "id": tool_call.id,
                        "type": tool_call.type,
                        "function": {
                            "name": tool_call.function.name,
                            "arguments": tool_call.function.arguments
                        }
                    })
                logger.info(f"LLM请求调用工具: {[tc['function']['name'] for tc in tool_calls]}")
            
            # Token统计
            usage = response.usage
            prompt_tokens = usage.prompt_tokens
            completion_tokens = usage.completion_tokens
            total_tokens = usage.total_tokens
            
            # 计算成本（GPT-3.5-turbo价格：输入$0.0015/1K，输出$0.002/1K）
            cost = (prompt_tokens * 0.0015 + completion_tokens * 0.002) / 1000
            
            logger.info(
                f"LLM响应成功 | 耗时: {duration:.2f}s | "
                f"Tokens: {total_tokens} | 成本: ${cost:.4f}"
            )
            
            result = {
                "content": content,
                "finish_reason": finish_reason,
                "tokens": {
                    "prompt": prompt_tokens,
                    "completion": completion_tokens,
                    "total": total_tokens,
                },
                "cost": cost,
                "duration": duration,
                "model": self.model,
            }
            
            # 添加tool_calls（如果有）
            if tool_calls:
                result["tool_calls"] = tool_calls
            
            return result
            
        except OpenAIError as e:
            logger.error(f"OpenAI API错误: {e}")
            raise
        except Exception as e:
            logger.error(f"LLM调用失败: {e}")
            raise
    
    @retry(
        stop=stop_after_attempt(2),  # 流式重试次数少一些
        wait=wait_exponential(multiplier=1, min=1, max=5),
        retry=retry_if_exception_type((OpenAIError, TimeoutError, ConnectionError)),
        before_sleep=before_sleep_log(logger, "WARNING"),
        reraise=True
    )
    async def chat_stream(
        self,
        messages: List[Dict[str, str]],
        temperature: Optional[float] = None,
        max_tokens: Optional[int] = None,
        **kwargs
    ) -> AsyncIterator[str]:
        """
        对话生成（流式）
        
        Args:
            messages: 消息列表
            temperature: 温度参数
            max_tokens: 最大token数
            **kwargs: 其他参数
            
        Yields:
            逐字生成的内容
        """
        if not self.client:
            raise RuntimeError("LLM客户端未初始化，请先配置OPENAI_API_KEY")
        
        try:
            stream = await self.client.chat.completions.create(
                model=self.model,
                messages=messages,
                temperature=temperature or self.temperature,
                max_tokens=max_tokens or self.max_tokens,
                stream=True,
                **kwargs
            )
            
            async for chunk in stream:
                if chunk.choices:
                    delta = chunk.choices[0].delta
                    if delta.content:
                        yield delta.content
                        
        except OpenAIError as e:
            logger.error(f"OpenAI流式API错误: {e}")
            raise
        except Exception as e:
            logger.error(f"LLM流式调用失败: {e}")
            raise
    
    async def chat_with_functions(
        self,
        messages: List[Dict[str, str]],
        functions: List[Dict[str, Any]],
        function_call: str = "auto",
        **kwargs
    ) -> Dict[str, Any]:
        """
        函数调用对话
        
        Args:
            messages: 消息列表
            functions: 可调用的函数列表
            function_call: 函数调用策略 ("auto", "none", {"name": "function_name"})
            **kwargs: 其他参数
            
        Returns:
            响应字典，可能包含function_call
        """
        if not self.client:
            raise RuntimeError("LLM客户端未初始化")
        
        try:
            response = await self.client.chat.completions.create(
                model=self.model,
                messages=messages,
                functions=functions,
                function_call=function_call,
                **kwargs
            )
            
            choice = response.choices[0]
            message = choice.message
            
            result = {
                "content": message.content,
                "finish_reason": choice.finish_reason,
            }
            
            # 如果有函数调用
            if hasattr(message, 'function_call') and message.function_call:
                result["function_call"] = {
                    "name": message.function_call.name,
                    "arguments": message.function_call.arguments,
                }
            
            return result
            
        except Exception as e:
            logger.error(f"函数调用失败: {e}")
            raise
    
    def count_tokens(self, text: str) -> int:
        """
        估算文本的token数量（近似）
        
        Args:
            text: 文本内容
            
        Returns:
            token数量（估算）
        """
        # 简单估算：中文约1.5字符/token，英文约4字符/token
        chinese_chars = sum(1 for c in text if '\u4e00' <= c <= '\u9fff')
        other_chars = len(text) - chinese_chars
        
        estimated_tokens = int(chinese_chars / 1.5 + other_chars / 4)
        return estimated_tokens
    
    def truncate_messages(
        self,
        messages: List[Dict[str, str]],
        max_tokens: int = 3000
    ) -> List[Dict[str, str]]:
        """
        截断消息历史，保持在token限制内
        
        Args:
            messages: 消息列表
            max_tokens: 最大token数
            
        Returns:
            截断后的消息列表
        """
        # 始终保留系统消息
        system_messages = [m for m in messages if m.get("role") == "system"]
        other_messages = [m for m in messages if m.get("role") != "system"]
        
        # 从最新的消息开始保留
        kept_messages = []
        total_tokens = 0
        
        for message in reversed(other_messages):
            content = message.get("content", "")
            tokens = self.count_tokens(content)
            
            if total_tokens + tokens > max_tokens:
                break
            
            kept_messages.insert(0, message)
            total_tokens += tokens
        
        # 合并系统消息和保留的消息
        result = system_messages + kept_messages
        
        logger.info(f"消息截断: {len(messages)} -> {len(result)} 条 | 预估tokens: {total_tokens}")
        
        return result


# 全局LLM客户端实例
llm_client = LLMClient()


# 初始化函数
def init_llm():
    """初始化LLM客户端"""
    llm_client.init()


# 便捷函数
async def chat(
    messages: List[Dict[str, str]],
    **kwargs
) -> Dict[str, Any]:
    """便捷的对话函数"""
    return await llm_client.chat(messages, **kwargs)


async def chat_stream(
    messages: List[Dict[str, str]],
    **kwargs
) -> AsyncIterator[str]:
    """便捷的流式对话函数"""
    async for chunk in llm_client.chat_stream(messages, **kwargs):
        yield chunk

