# LLM客户端封装
import os
import logging
from typing import Generator, Dict, List, Optional
from openai import OpenAI
from openai import APIError, APIConnectionError, RateLimitError, AuthenticationError
from django.conf import settings

# 配置日志
logger = logging.getLogger(__name__)


class LLMClient:
    """大语言模型客户端，封装与LLM的交互"""

    def __init__(self, model: str = None, system_prompt: str = None):
        """
        初始化LLM客户端

        Args:
            model: 模型名称，默认为settings中的配置
            system_prompt: 系统提示词，默认为settings中的配置
        """
        # 从环境变量获取API密钥
        self.api_key = os.getenv("DASHSCOPE_API_KEY")
        if not self.api_key:
            raise ValueError("未找到DASHSCOPE_API_KEY环境变量，请确保已正确设置")

        # 初始化客户端
        self.client = OpenAI(
            api_key=self.api_key,
            base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
        )

        # 模型配置
        self.model = model or settings.LLM_MODEL
        self.system_prompt = system_prompt or settings.SYSTEM_PROMPT

        # 对话状态
        self.messages: List[Dict[str, str]] = [
            {"role": "system", "content": self.system_prompt}
        ]
        self.usage = None  # 存储token使用量

    def add_message(self, role: str, content: str) -> None:
        """
        添加消息到对话历史

        Args:
            role: 消息角色，只能是"user"或"assistant"
            content: 消息内容
        """
        if role not in ["user", "assistant"]:
            raise ValueError("角色必须是'user'或'assistant'")

        self.messages.append({"role": role, "content": content})
        logger.debug(f"添加消息: {role} - {content[:50]}...")

    def stream_chat(self, user_input: Optional[str] = None, **kwargs) -> Generator[str, None, None]:
        """
        流式聊天接口

        Args:
            user_input: 用户输入内容
            **kwargs: 额外的API参数

        Returns:
            生成器，返回流式输出的内容片段
        """
        try:
            # 添加用户输入到对话历史
            if user_input:
                self.add_message("user", user_input)

            logger.info(f"调用模型: {self.model}, 消息数量: {len(self.messages)}")

            # 调用API，开启流式输出
            completion = self.client.chat.completions.create(
                model=self.model,
                messages=self.messages,
                stream=True,
                stream_options={"include_usage": True},
                # 使用Qwen3开源版模型时需要启用下面这行
                # extra_body={"enable_thinking": False},** kwargs
            )

            full_response = []
            # 处理流式响应
            for chunk in completion:
                if chunk.choices and chunk.choices[0].delta.content:
                    content = chunk.choices[0].delta.content
                    full_response.append(content)
                    yield content

                # 记录token使用量
                if chunk.usage:
                    self.usage = chunk.usage

            # 将完整响应添加到对话历史
            if full_response:
                self.add_message("assistant", ''.join(full_response))

        except AuthenticationError:
            logger.error("认证失败: API密钥可能无效或已过期")
            raise Exception("认证失败，请检查API密钥是否正确")
        except APIConnectionError:
            logger.error("连接错误: 无法连接到API服务器")
            raise Exception("网络连接失败，请检查网络设置")
        except RateLimitError:
            logger.error("速率限制: 已超过API调用限额")
            raise Exception("API调用过于频繁，请稍后再试")
        except APIError as e:
            logger.error(f"API错误: {str(e)}")
            raise Exception(f"API调用失败: {str(e)}")
        except Exception as e:
            logger.error(f"发生未知错误: {str(e)}")
            raise

    def get_usage(self) -> Optional[Dict]:
        """获取最近一次对话的token使用量"""
        if self.usage:
            return self.usage.model_dump()
        return None

    def clear_history(self) -> None:
        """清空对话历史，保留系统提示"""
        self.messages = [
            {"role": "system", "content": self.system_prompt}
        ]
        self.usage = None
        logger.info("对话历史已清空")

    def get_history(self) -> List[Dict[str, str]]:
        """获取当前对话历史"""
        return self.messages.copy()
