import os
import json
from openai import OpenAI
from typing import Generator, List, Dict, Optional, Union
from pydantic import BaseModel


# 消息结构模型（用于类型提示和校验）
class Message(BaseModel):
    role: str  # "system", "user", 或 "assistant"
    content: str


class ThreadSafeChatBot:
    """线程安全的对话工具类（无状态设计，每次调用传入完整对话历史）"""

    def __init__(self,
                 base_url: str = "https://dashscope.aliyuncs.com/compatible-mode/v1",
                 default_model: str = "qwen-plus"):
        print("初始化对话工具类...")
        self.api_key = os.getenv("DASHSCOPE_API_KEY")
        if not self.api_key:
            raise ValueError("API密钥未配置，请通过参数或环境变量DASHSCOPE_API_KEY设置")

        self.base_url = base_url
        self.default_model = default_model

        print("api_key: ", self.api_key)
        self.client = OpenAI(
            api_key=self.api_key,
            base_url=self.base_url
        )

    def _validate_messages(self, messages: List[Dict[str, str]]) -> None:
        """验证消息格式是否正确"""
        if not messages:
            raise ValueError("对话历史不能为空，请至少包含系统提示或用户消息")

        valid_roles = {"system", "user", "assistant"}
        for i, msg in enumerate(messages):
            if "role" not in msg or "content" not in msg:
                raise ValueError(f"消息格式错误，第{i + 1}条消息缺少role或content字段")
            if msg["role"] not in valid_roles:
                raise ValueError(f"消息角色错误，第{i + 1}条消息的role必须是{valid_roles}之一")
            if not isinstance(msg["content"], str) or len(msg["content"].strip()) == 0:
                raise ValueError(f"消息内容错误，第{i + 1}条消息的content不能为空字符串")

    def chat(
            self,
            messages: List[Dict[str, str]],
            model: Optional[str] = None,
            temperature: float = 0.9,
            stream: bool = False
    ) -> Union[str, Generator[str, None, None]]:
        """通用对话接口（支持单轮/多轮、流式/非流式）"""
        print("messages: 通用对话接口（支持单轮/多轮、流式/非流式）")
        # 1. 参数处理与校验
        used_model = model or self.default_model
        self._validate_messages(messages)

        # 2. 动态构建API参数：仅stream=True时添加stream_options
        api_params = {
            "model": used_model,
            "messages": messages,
            "temperature": temperature,
            "stream": stream
        }
        # 关键修复：仅在流式请求时添加stream_options
        if stream:
            api_params["stream_options"] = {"include_usage": True}

        # 3. 调用API
        try:
            completion = self.client.chat.completions.create(**api_params)
            print("stream: ", stream)

            # 4. 处理非流式响应
            if not stream:
                print("处理非流式响应...")
                return completion.choices[0].message.content

            # 5. 处理流式响应
            def stream_generator() -> Generator[str, None, None]:
                print("处理流式响应...")
                full_content = ""
                for chunk in completion:
                    if not chunk.choices:
                        continue

                    choice = chunk.choices[0]
                    # 处理内容片段
                    if hasattr(choice.delta, "content") and choice.delta.content:
                        print("处理内容片段...")
                        full_content += choice.delta.content
                        yield json.dumps({
                            "type": "content",
                            "data": choice.delta.content,
                            "complete": False
                        })
                    # 处理结束标识
                    elif hasattr(choice, "finish_reason") and choice.finish_reason:
                        yield json.dumps({
                            "type": "finish",
                            "data": {
                                "reason": choice.finish_reason,
                                "full_content": full_content
                            },
                            "complete": True
                        })

            return stream_generator()

        except Exception as e:
            if stream:
                def error_generator() -> Generator[str, None, None]:
                    yield json.dumps({
                        "type": "error",
                        "data": str(e),
                        "complete": True
                    })

                return error_generator()
            else:
                raise e

    # 单轮对话便捷接口（非流式）
    def single_turn_chat(
            self,
            question: str,
            system_prompt: str = "You are a helpful assistant.",
            model: Optional[str] = None,
            temperature: float = 0.9
    ) -> str:
        messages = [
            {"role": "system", "content": system_prompt},
            {"role": "user", "content": question}
        ]
        return self.chat(
            messages=messages,
            model=model,
            temperature=temperature,
            stream=False
        )

    # 流式单轮对话便捷接口
    def stream_single_turn_chat(
            self,
            question: str,
            system_prompt: str = "You are a helpful assistant.",
            model: Optional[str] = None,
            temperature: float = 0.9
    ) -> Generator[str, None, None]:
        messages = [
            {"role": "system", "content": system_prompt},
            {"role": "user", "content": question}
        ]
        return self.chat(
            messages=messages,
            model=model,
            temperature=temperature,
            stream=True
        )
