# 采购OpenAi模式请求阿里百练Api
from openai import OpenAI
from agent.qwen.api.ApiModel import ApiModel

class OpenAiModel(ApiModel):
    def __init__(self, api_key='', base_url="", model="", stream=False, thinking=False):
        super().__init__(api_key, base_url=base_url, model=model, stream=stream, thinking=thinking)
        self.client = OpenAI(api_key=api_key, base_url=base_url)

    # 输出Token数量
    def print_token_count(self, token_usage):
        if token_usage is not None:
            print(f"\n--- 请求用量, 输入 Tokens: {token_usage.prompt_tokens}， 输出 Tokens: {token_usage.completion_tokens}, 总计 Tokens: {token_usage.total_tokens}")

    # 非流式输出
    def print_text_not_stream(self, resp) -> str:
        answer = resp.choices[0].message.content
        print(f"模型：{answer}")
        return answer

    # 流式输出
    def print_text_stream(self, resp) -> str:
        answer = ""
        # 是否进入回复阶段
        is_answering = False
        if self.thinking:
            print("\n" + "=" * 20 + "思考过程" + "=" * 20 + "\n")
        for chunk in resp:
            #  对话信息。
            if chunk.choices:
                # 思考内容
                delta = chunk.choices[0].delta
                if hasattr(delta, "reasoning_content") and delta.reasoning_content is not None:
                    if not is_answering:
                        print(delta.reasoning_content, end="", flush=True)
                # 回答问题
                elif hasattr(delta, "content") and delta.content:
                    if self.thinking and not is_answering:
                        # 开始回答问题
                        print("\n" + "=" * 20 + "完整回复" + "=" * 20 + "\n")
                        is_answering = True
                    content = delta.content or ""
                    print(content, end="", flush=True)
                    # 保存完成答案
                    answer += content
            # 输出Token数量
            elif chunk.usage:
                self.print_token_count(chunk.usage)
        return answer

    # 对话
    def chat(self, messages) ->str:
        try:
            if self.stream:
                completion = self.client.chat.completions.create(
                    messages=messages,
                    model=self.model,
                    stream=True,
                    # 通过 extra_body 设置 enable_thinking 开启思考过程
                    extra_body={"enable_thinking": self.thinking},
                    # 目的：在最后一个chunk中获取本次请求的Token用量。
                    stream_options={"include_usage": self.stream}
                )
            else:
                completion = self.client.chat.completions.create(
                    model=self.model,
                    # 通过 extra_body 设置 enable_thinking 开启思考过程
                    extra_body={"enable_thinking": self.thinking},
                    messages=messages,
                )

        except Exception as e:
            print(e)
            return "系统繁忙，请稍后再试！"
        else:
            return super().print_text_answer(completion)