import os
import json
from openai import OpenAI, APIError, AuthenticationError, APIConnectionError, Timeout


class QwenChatBot:
    """
    Qwen模型对话工具类，支持单轮对话、多轮对话和流式输出

    功能特点:
    - 支持单轮对话（不保留上下文）
    - 支持多轮对话（自动维护上下文）
    - 支持流式输出（逐段获取回答）
    - 完善的错误处理机制
    - 可配置的模型参数
    """

    def __init__(self,
                 api_key=None,
                 base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
                 model="qwen-plus",
                 max_tokens=None):
        """
        初始化对话机器人

        :param api_key: 阿里云百炼API密钥，若为None则从环境变量DASHSCOPE_API_KEY获取
        :param base_url: API基础URL
        :param model: 使用的模型名称
        :param system_prompt: 系统提示语，定义AI的行为方式
        :param max_tokens: 生成回答的最大token数限制
        """
        self.api_key = api_key or os.getenv("DASHSCOPE_API_KEY")
        self.base_url = base_url
        self.model = model
        self.max_tokens = max_tokens
        self.conversation_history = []  # 存储对话历史，用于多轮对话
        self._client = None

        print("api_key: ", self.api_key)

        # 验证API密钥
        if not self.api_key:
            raise ValueError("API密钥未提供，请传入api_key参数或设置DASHSCOPE_API_KEY环境变量")

        # 初始化客户端和对话历史
        self._init_client()
        self.reset_conversation()

    def _init_client(self):
        """初始化OpenAI客户端（兼容百炼API）"""
        try:
            self._client = OpenAI(
                api_key=self.api_key,
                base_url=self.base_url
            )
        except Exception as e:
            raise RuntimeError(f"初始化客户端失败: {str(e)}")

    def reset_conversation(self):
        """重置对话历史，保留系统提示"""
        self.conversation_history = [{"role": "system", "content": self.system_prompt}]

    def add_message(self, role, content):
        """
        手动向对话历史添加消息（用于多轮对话）

        :param role: 角色，"user"表示用户，"assistant"表示助手
        :param content: 消息内容
        """
        if role not in ["user", "assistant"]:
            raise ValueError("角色必须是'user'或'assistant'")

        if not content or not str(content).strip():
            raise ValueError("消息内容不能为空")

        self.conversation_history.append({"role": role, "content": content})

    def get_conversation_history(self):
        """获取当前对话历史的副本，避免外部直接修改"""
        return [msg.copy() for msg in self.conversation_history]

    def single_turn_chat(self, question, temperature=0.9):
        """
        单轮对话，不保留对话历史

        :param question: 用户问题
        :param temperature: 生成温度，值越高回答越多样（0-2之间）
        :return: 包含回答和token使用量的字典
        """
        if not question or not str(question).strip():
            raise ValueError("问题不能为空")

        try:
            # 创建临时消息列表，包含系统消息和当前问题
            messages = [
                {"role": "user", "content": question}
            ]

            # 构建请求参数
            params = {
                "model": self.model,
                "messages": messages,
                "temperature": temperature
            }

            # 添加可选参数
            if self.max_tokens is not None:
                params["max_tokens"] = self.max_tokens

            completion = self._client.chat.completions.create(**params)

            print("completion: ", completion.model_dump_json())

            # 验证返回结果
            if not completion.choices or len(completion.choices) == 0:
                raise RuntimeError("模型返回结果为空")

            answer = completion.choices[0].message.content
            if not answer:
                raise RuntimeError("模型未返回有效内容")

            return {
                "answer": answer,
                "usage": {
                    "prompt_tokens": completion.usage.prompt_tokens,
                    "completion_tokens": completion.usage.completion_tokens,
                    "total_tokens": completion.usage.total_tokens
                }
            }

        except AuthenticationError:
            raise RuntimeError("API密钥无效或认证失败")
        except APIConnectionError:
            raise RuntimeError("无法连接到API服务，请检查网络")
        except Timeout:
            raise RuntimeError("API请求超时，请稍后重试")
        except APIError as e:
            raise RuntimeError(f"API调用错误: {str(e)}")
        except Exception as e:
            raise RuntimeError(f"处理请求时发生错误: {str(e)}")

    def multi_turn_chat(self, question, temperature=0.9):
        """
        多轮对话，自动维护对话历史

        :param question: 用户问题
        :param temperature: 生成温度，值越高回答越多样（0-2之间）
        :return: 包含回答和token使用量的字典
        """
        if not question or not str(question).strip():
            raise ValueError("问题不能为空")

        try:
            # 添加用户问题到对话历史
            self.add_message("user", question)

            # 构建请求参数
            params = {
                "model": self.model,
                "messages": self.conversation_history,
                "temperature": temperature
            }

            if self.max_tokens is not None:
                params["max_tokens"] = self.max_tokens

            completion = self._client.chat.completions.create(**params)

            # 验证返回结果
            if not completion.choices or len(completion.choices) == 0:
                raise RuntimeError("模型返回结果为空")

            answer = completion.choices[0].message.content
            if not answer:
                raise RuntimeError("模型未返回有效内容")

            # 添加助手回答到对话历史
            self.add_message("assistant", answer)

            return {
                "answer": answer,
                "usage": {
                    "prompt_tokens": completion.usage.prompt_tokens,
                    "completion_tokens": completion.usage.completion_tokens,
                    "total_tokens": completion.usage.total_tokens
                }
            }

        except Exception as e:
            # 发生错误时，回滚刚刚添加的用户问题
            if (self.conversation_history and
                    self.conversation_history[-1]["role"] == "user" and
                    self.conversation_history[-1]["content"] == question):
                self.conversation_history.pop()
            raise e

    def stream_chat(self, question, is_multi_turn=False, temperature=0.9):
        """
        流式输出对话，逐段返回回答内容

        :param question: 用户问题
        :param is_multi_turn: 是否启用多轮对话模式（保留上下文）
        :param temperature: 生成温度，值越高回答越多样（0-2之间）
        :return: 生成器，逐段返回JSON格式的内容
        """
        if not question or not str(question).strip():
            yield json.dumps({
                "type": "error",
                "message": "问题不能为空"
            })
            return

        try:
            # 准备消息列表
            if is_multi_turn:
                # 多轮模式，使用现有对话历史
                self.add_message("user", question)
                messages = self.conversation_history
            else:
                # 单轮模式，创建临时消息列表
                messages = [
                    {"role": "system", "content": self.system_prompt},
                    {"role": "user", "content": question}
                ]

            # 构建请求参数
            params = {
                "model": self.model,
                "messages": messages,
                "stream": True,
                "temperature": temperature,
                "stream_options": {"include_usage": True}
            }

            if self.max_tokens is not None:
                params["max_tokens"] = self.max_tokens

            # 调用流式API
            completion = self._client.chat.completions.create(**params)

            full_answer = ""  # 用于多轮模式保存完整回答

            # 处理流式返回的每个chunk
            for chunk in completion:
                if not chunk:
                    continue

                # 处理choices字段
                if chunk.choices and len(chunk.choices) > 0:
                    choice = chunk.choices[0]

                    # 处理内容片段
                    if hasattr(choice, 'delta') and choice.delta:
                        delta = choice.delta
                        if hasattr(delta, 'content') and delta.content is not None:
                            full_answer += delta.content
                            yield json.dumps({
                                "type": "content",
                                "content": delta.content
                            })

                    # 处理结束标志
                    if hasattr(choice, 'finish_reason') and choice.finish_reason:
                        # 多轮模式下，添加完整回答到对话历史
                        if is_multi_turn:
                            self.add_message("assistant", full_answer)

                        yield json.dumps({
                            "type": "finish",
                            "reason": choice.finish_reason
                        })

                # 处理使用量统计
                if hasattr(chunk, 'usage') and chunk.usage:
                    usage = chunk.usage
                    yield json.dumps({
                        "type": "usage",
                        "data": {
                            "prompt_tokens": usage.prompt_tokens,
                            "completion_tokens": usage.completion_tokens,
                            "total_tokens": usage.total_tokens
                        }
                    })

            # 发送流结束标志
            yield json.dumps({"type": "done"})

        except AuthenticationError:
            yield json.dumps({
                "type": "error",
                "message": "API密钥无效或认证失败"
            })
        except APIConnectionError:
            yield json.dumps({
                "type": "error",
                "message": "无法连接到API服务，请检查网络"
            })
        except Timeout:
            yield json.dumps({
                "type": "error",
                "message": "API请求超时，请稍后重试"
            })
        except APIError as e:
            yield json.dumps({
                "type": "error",
                "message": f"API调用错误: {str(e)}",
                "code": getattr(e, 'code', 'unknown')
            })
        except Exception as e:
            # 发生错误时，回滚刚刚添加的用户问题（如果是多轮模式）
            if (is_multi_turn and self.conversation_history and
                    self.conversation_history[-1]["role"] == "user" and
                    self.conversation_history[-1]["content"] == question):
                self.conversation_history.pop()

            yield json.dumps({
                "type": "error",
                "message": f"处理请求时发生错误: {str(e)}"
            })


# 使用示例
if __name__ == "__main__":
    try:
        # 初始化聊天机器人
        bot = QwenChatBot(
            # api_key="your_api_key_here",  # 可选，若未设置环境变量则需提供
            model="qwen-plus",
            system_prompt="你是一个乐于助人的助手，用中文简洁明了地回答问题。"
        )

        print("===== 单轮对话示例 =====")
        result = bot.single_turn_chat("什么是人工智能？")
        print(f"回答: {result['answer']}")
        print(f"Token使用量: {result['usage']}\n")

        print("===== 多轮对话示例 =====")
        # 第一轮
        result = bot.multi_turn_chat("推荐一本机器学习的入门书籍")
        print(f"问题1: 推荐一本机器学习的入门书籍")
        print(f"回答1: {result['answer']}\n")

        # 第二轮（上下文相关）
        result = bot.multi_turn_chat("这本书的主要内容是什么？")
        print(f"问题2: 这本书的主要内容是什么？")
        print(f"回答2: {result['answer']}\n")

        print("===== 流式对话示例 =====")
        print("问题: 请简要介绍一下Python编程语言")
        print("回答: ", end="", flush=True)
        for chunk in bot.stream_chat("请简要介绍一下Python编程语言"):
            data = json.loads(chunk)
            if data["type"] == "content":
                print(data["content"], end="", flush=True)
        print("\n")

        print("===== 多轮流式对话示例 =====")
        bot.reset_conversation()  # 重置对话历史
        print("问题1: 什么是函数式编程？")
        print("回答1: ", end="", flush=True)
        for chunk in bot.stream_chat("什么是函数式编程？", is_multi_turn=True):
            data = json.loads(chunk)
            if data["type"] == "content":
                print(data["content"], end="", flush=True)
        print("\n")

        print("问题2: Python如何支持函数式编程？")
        print("回答2: ", end="", flush=True)
        for chunk in bot.stream_chat("Python如何支持函数式编程？", is_multi_turn=True):
            data = json.loads(chunk)
            if data["type"] == "content":
                print(data["content"], end="", flush=True)
        print("\n")

    except Exception as e:
        print(f"发生错误: {str(e)}")
