import os
from typing import List
import requests

from cbec.llm.model.Message import Message
from cbec.llm.model.ChatCompletion import ChatCompletion


"""
Openai 接口兼容格式
"""

class OpenAICompatible:

    def __init__(
            self,
            api_url: str = os.getenv("OPENAI_BASE_URL") or "https://api.openai.com/v1",
            api_key: str | None = os.getenv("OPENAI_API_KEY"),
            model: str = "gpt-4o",
            temperature: float = 0.0,
    ):
        self.api_url = api_url
        self.api_key = api_key
        self.model = model
        self.temperature = temperature

    def chat(
            self,
            user_input: str,
            chat_historys: List[Message] = [],
            temperature: float = 0.6,
            timeout: float | None = None,
    ) -> str:
        """
        【发起对话】非流式响应
        :param user_input: 用户最新输入
        :param chat_historys: 历史记录
        :param temperature: 温度 - 默认0.6
        :param timeout: 默认不配置超时
        :return: 模型对话结果
        """
        new_message = Message(role="user", content=user_input)
        chat_historys.append(new_message)

        chat_completion_result = self._make_chat_api(
            model=self.model,
            messages=chat_historys,
            temperature=temperature
        )

        return chat_completion_result.choices[0].message.get("content")

    def chat_stream(
            self,
            user_input: str,
            chat_historys: List[Message] = [],
            temperature: float = 0.6,
            timeout: float | None = None,
    ):
        """
        【发起对话】流式响应
        :param user_input: 用户最新输入
        :param chat_historys: 历史记录
        :param temperature: 温度 - 默认0.6
        :param timeout: 默认不配置超时
        :yield: 模型对话结果的流式内容块
        """
        new_message = Message(role="user", content=user_input)
        chat_historys.append(new_message)

        # 准备请求参数
        openai_messages = [
            {"role": msg.role, "content": msg.content}
            for msg in chat_historys
        ]

        params = {
            "messages": openai_messages,
            "model": self.model,
            "temperature": temperature,
            "stream": True  # 开启流式响应
        }

        headers = {
            "Content-Type": "application/json",
            "Authorization": f"Bearer {self.api_key}",
        }

        # 发起流式响应请求
        response = requests.post(
            self.api_url + "/chat/completions",
            json=params,
            headers=headers,
            stream=True,
            timeout=timeout
        )

        # 处理流式响应
        for line in response.iter_lines():
            if line:
                line = line.decode('utf-8')
                # 跳过 "data: " 前缀
                if line.startswith("data: "):
                    line = line[6:]

                    # 跳过心跳消息
                    if line == "[DONE]":
                        break

                    try:
                        import json
                        data = json.loads(line)
                        chunk = data.get("choices", [{}])[0].get("delta", {}).get("content", "")
                        if chunk:
                            yield chunk
                    except Exception as e:
                        print(f"解析流式响应出错: {e}")
                        continue

    def _make_chat_api(
            self,
            messages: List[Message],
            model: str = "deepseek-chat",
            temperature: float = 0.6,
    ) -> ChatCompletion:
        """
        发起chat请求，非流式响应
        :param messages:
        :param model:
        :param temperature:
        :return: ChatCompletion
        """
        openai_messages = [
            {"role": msg.role, "content": msg.content}
            for msg in messages
        ]
        # 添加最新用户输入
        params = {
            "messages": openai_messages,
            "model": model,
            "temperature": temperature,
        }

        headers = {
            "Content-Type": "application/json",
            "Authorization": f"Bearer {self.api_key}",
        }

        resp = requests.post(
            self.api_url + "/chat/completions",
            json=params,
            headers=headers
        )

        # 将 DeepSeek API 响应转换为我们的模型对象
        response_json = resp.json()

        from cbec.llm.model.ChatCompletion import ChatCompletion
        from cbec.llm.model.Choice import Choice
        from cbec.llm.model.Usage import Usage

        # 处理 Usage 对象 - 注意字段名称差异 (prompt_tokens -> prompt_token)
        usage = Usage(
            prompt_token=response_json['usage']['prompt_tokens'],
            completion_tokens=response_json['usage']['completion_tokens'],
            total_tokens=response_json['usage']['total_tokens']
        )

        # 处理 Choices 对象
        choices = []
        for choice_data in response_json['choices']:
            choice = Choice(
                index=choice_data['index'],
                message={
                    'role': 'assistant',
                    'content': choice_data['message']['content']
                },
                finish_reason=choice_data['finish_reason']
            )
            choices.append(choice)

        # 创建 ChatCompletion 对象
        chat_completion = ChatCompletion(
            id=response_json['id'],
            created=response_json['created'],
            model=response_json['model'],
            choices=choices,
            usage=usage
        )

        return chat_completion
