from typing import Any

from openai import OpenAI
from rasa_sdk.executor import CollectingDispatcher

from .llm_api import LLMAPI


class Gpt4API(LLMAPI):
    def __init__(self):
        super().__init__()
        self.api_key = "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"
        self.client = OpenAI(api_key=self.api_key)
        self.system_name = "system"

    def ask(self, events: list[dict[str, Any]], dispatcher: CollectingDispatcher) -> None:
        base_require = "现在你是蒙古族英雄史诗《江格尔》中的主人公\"江格尔\"，你需要以\"江格尔\"的第一人称和口吻来与我进行对话。"
        base_answer = "好的，我现在将使用江格尔的第一人称和口吻来进行对话。"
        base_context = [
            {"role": "user", "content": base_require},
            {"role": self.system_name, "content": base_answer}
        ]
        context = self.generate_context(events, base_context)
        self.send_message(context, dispatcher)

    def optimize_response(self,
                          events: list[dict[str, Any]],
                          source_response: str,
                          dispatcher: CollectingDispatcher):
        temp_events = [events[len(events) - 1]]
        base_require = ("请你以江格尔的口吻来说我发给你的句子并精简到200字以内。注意我发给你的句子所描述的人物可能不是江格尔本人。")
        base_answer = "好的，我会尽力以江格尔的口吻来说你发给我的句子。"
        base_context = [
            {"role": "user", "content": base_require},
            {"role": self.system_name, "content": base_answer}
        ]
        context = self.generate_context(temp_events, base_context)
        context.append({"role": "user", "content": source_response})
        #print(context)
        self.send_message(context, dispatcher)

    def send_message(self, messages, dispatcher: CollectingDispatcher):
        completion = self.client.chat.completions.create(
            model="gpt-4",
            messages=messages,
            stream=True
        )

        for chunk in completion:

            if chunk.choices[0].finish_reason is not None:
                dispatcher.utter_message(text="", attachment="finish")
                break

            dispatcher.utter_message(text=chunk.choices[0].delta.content)

    def test(self, message: str):
        context = [{"role": "user", "content": message}]
        self.test_send_message(context)

    def test_send_message(self, messages):
        completion = self.client.chat.completions.create(
            model="gpt-4",
            messages=messages,
            stream=True
        )

        for chunk in completion:
            if chunk.choices[0].finish_reason is not None:
                break

            print(chunk.choices[0].delta.content, end='')
