"""
大语言模型API客户端
专门负责与AI API的通信，提供统一的LLM调用接口
支持缓存和重试机制，提高调用效率和稳定性
"""

import json
import logging
from typing import List, Dict, Any, Optional, AsyncGenerator
from pydantic import BaseModel

from ..core.http_client import get_http_client
from ..core.cache import cache_response
from ..core.config import settings
from .retry_service import async_retry

logger = logging.getLogger(__name__)


class ChatMessage(BaseModel):
    """聊天消息模型"""

    role: str  # "user", "assistant", "system"
    content: str  # 消息内容


class LLMClient:
    """
    大语言模型API客户端
    提供统一的LLM调用接口，支持多种AI服务
    """

    def __init__(self):
        # 使用DashScope API配置（阿里云通义千问）
        self.api_key = settings.dashscope_api_key
        self.api_base = settings.get_api_base_url("deepseek")  # 使用DashScope兼容模式
        self.model = "qwen-plus"  # 使用通义千问模型

    @cache_response("llm_chat", ttl=60)  # 缓存1分钟，减少重复调用
    @async_retry(retries=3, delay=1.0, backoff=2.0)
    async def chat_completion(
        self,
        messages: List[Dict[str, str]],
        system_prompt: Optional[str] = None,
        stream: bool = False,
        **kwargs,
    ) -> Dict[str, Any]:
        """
        发送聊天完成请求

        Args:
            messages: 消息列表
            system_prompt: 系统提示词
            stream: 是否流式返回
            **kwargs: 其他参数

        Returns:
            Dict: API响应结果

        Raises:
            ValueError: API调用失败时抛出
        """
        # 构建请求消息
        formatted_messages = []

        # 添加系统提示词
        if system_prompt:
            formatted_messages.append({"role": "system", "content": system_prompt})

        # 添加用户消息
        formatted_messages.extend(messages)

        # 构建请求数据
        request_data = {
            "model": self.model,
            "messages": formatted_messages,
            "temperature": kwargs.get("temperature", settings.temperature),
            "max_tokens": kwargs.get("max_tokens", settings.max_tokens),
            "stream": stream,
        }

        # 打印即将提交给通义千问API的参数
        logger.info(
            f"[通义千问] chat_completion请求参数: {json.dumps(request_data, ensure_ascii=False)}"
        )

        # 发送HTTP请求
        client = get_http_client()
        response = await client.post(
            f"{self.api_base}/chat/completions",
            headers={
                "Authorization": f"Bearer {self.api_key}",
                "Content-Type": "application/json",
            },
            json=request_data,
        )

        # 检查响应状态
        if response.status_code != 200:
            error_msg = f"LLM API调用失败: {response.status_code} - {response.text}"
            logger.error(error_msg)
            raise ValueError(error_msg)

        # 打印通义千问API返回的完整响应
        response_data = response.json()
        # logger.info(f"[通义千问] chat_completion返回内容: {json.dumps(response_data, ensure_ascii=False, indent=2)}")

        return response_data

    async def chat_stream(
        self,
        messages: List[Dict[str, str]],
        system_prompt: Optional[str] = None,
        **kwargs,
    ) -> AsyncGenerator[Dict[str, Any], None]:
        """
        流式聊天完成

        Args:
            messages: 消息列表
            system_prompt: 系统提示词
            **kwargs: 其他参数

        Yields:
            Dict: 流式响应数据块
        """
        # 构建请求消息
        formatted_messages = []

        if system_prompt:
            formatted_messages.append({"role": "system", "content": system_prompt})

        formatted_messages.extend(messages)

        # 构建请求数据
        request_data = {
            "model": self.model,
            "messages": formatted_messages,
            "temperature": kwargs.get("temperature", settings.temperature),
            "max_tokens": kwargs.get("max_tokens", settings.max_tokens),
            "stream": True,
        }

        # 发送流式请求
        client = get_http_client()

        async with client.stream(
            "POST",
            f"{self.api_base}/chat/completions",
            headers={
                "Authorization": f"Bearer {self.api_key}",
                "Content-Type": "application/json",
            },
            json=request_data,
        ) as response:
            if response.status_code != 200:
                error_msg = f"LLM流式API调用失败: {response.status_code}"
                logger.error(error_msg)
                raise ValueError(error_msg)

            # 处理流式响应
            async for chunk in response.aiter_lines():
                if chunk.startswith("data: "):
                    data = chunk[6:]  # 移除"data: "前缀

                    if data.strip() == "[DONE]":
                        break

                    try:
                        chunk_data = json.loads(data)
                        # 打印通义千问API流式返回的内容片段
                        logger.info(
                            f"[通义千问] chat_stream返回片段: {json.dumps(chunk_data, ensure_ascii=False)}"
                        )
                        yield chunk_data
                    except json.JSONDecodeError:
                        logger.warning(f"无法解析流式数据: {data}")
                        continue

    def extract_content_from_response(self, response_data: Dict[str, Any]) -> str:
        """
        从API响应中提取文本内容

        Args:
            response_data: API响应数据

        Returns:
            str: 提取的文本内容
        """
        try:
            if "choices" in response_data and len(response_data["choices"]) > 0:
                choice = response_data["choices"][0]

                # 处理完整响应
                if "message" in choice:
                    return choice["message"].get("content", "")
                # 处理流式响应
                elif "delta" in choice and "content" in choice["delta"]:
                    return choice["delta"]["content"]
                # 兼容其他格式
                elif "text" in choice:
                    return choice["text"]

            logger.warning("无法从响应中提取内容")
            return ""

        except Exception as e:
            logger.error(f"提取响应内容失败: {e}")
            return ""

    async def validate_api_key(self) -> bool:
        """
        验证API密钥是否有效

        Returns:
            bool: API密钥是否有效
        """
        try:
            # 发送一个简单的测试请求
            test_messages = [{"role": "user", "content": "Hello"}]
            response = await self.chat_completion(test_messages)
            return "choices" in response
        except Exception as e:
            logger.error(f"API密钥验证失败: {e}")
            return False


# 全局LLM客户端实例
llm_client = LLMClient()
