from openai import OpenAI
from llamaapi import LlamaAPI
from env import LLAMA3_API_KEY
import re
from lib.tencent.hunyuan_lite import HunyuanLite
from lib.baidu.ernie_speed import ErnieSpeed


class Llama3:

    @classmethod
    def INPUT_TYPES(s):
        return {
            "required": {
                "content": ("STRING", {"multiline": True, "default": ""}),
                "question": (["q1", "q2", "q3"], {"default": "q1"}),
            },
            "optional": {
                "custom_question": (
                    "STRING",
                    {"default": ""},
                ),
            },
        }

    RETURN_TYPES = (
        "STRING",
        "STRING",
        "STRING",
        "STRING",
    )
    RETURN_NAMES = (
        "prompt",
        "negative prompt",
        "system_question",
        "question",
    )

    FUNCTION = "handle"

    OUTPUT_NODE = False

    CATEGORY = "fcl/prompt"

    question_dict = {
        "q1": "Act as a prompt maker with the following guidelines: - Break keywords by commas. - Provide high-quality, non-verbose, coherent, brief, concise, and not superfluous prompts. - Focus solely on the visual elements of the picture; avoid art commentaries or intentions. - Construct the prompt with the component format: 1. Start with the subject and keyword description. 2. Follow with scene keyword description. 3. Finish with background and keyword description. - Limit yourself to no more than 7 keywords per component - Include all the keywords from the user's request verbatim as the main subject of the response. - Be varied and creative. - Always reply on the same line and no more than 100 words long. - Do not enumerate or enunciate components. - Do not include any additional information in the response. The following is an illustrative example for you to see how to construct a prompt your prompts should follow this format but always coherent to the subject worldbuilding or setting and consider the elements relationship. Example: Subject: Demon Hunter, Cyber City. prompt: A Demon Hunter, standing, lone figure, glow eyes, deep purple light, cybernetic exoskeleton, sleek, metallic, glowing blue accents, energy weapons. Fighting Demon, grotesque creature, twisted metal, glowing red eyes, sharp claws, Cyber City, towering structures, shrouded haze, shimmering energy. Make a prompt for the following Subject: </INSTRUCTION>",
        "q2": """
        # Stable Diffusion prompt 助理

你来充当一位有艺术气息的Stable Diffusion prompt 助理。

## 任务

我用自然语言告诉你要生成的prompt的主题，你的任务是根据这个主题想象一幅完整的画面，然后转化成一份详细的、高质量的prompt，让Stable Diffusion可以生成高质量的图像。

## 背景介绍

Stable Diffusion是一款利用深度学习的文生图模型，支持通过使用 prompt 来产生新的图像，描述要包含或省略的元素。

## prompt 概念

- 完整的prompt包含“<Prompt></Prompt>和<NegativePrompt></NegativePrompt>"两部分。
- prompt 用来描述图像，由普通常见的单词构成，使用英文半角","做为分隔符。
- negative prompt用来描述你不想在生成的图像中出现的内容。
- 以","分隔的每个单词或词组称为 tag。所以prompt和negative prompt是由系列由","分隔的tag组成的。

## () 和 [] 语法

调整关键字强度的等效方法是使用 () 和 []。 (keyword) 将tag的强度增加 1.1 倍，与 (keyword:1.1) 相同，最多可加三层。 [keyword] 将强度降低 0.9 倍，与 (keyword:0.9) 相同。

## Prompt 格式要求

下面我将说明 prompt 的生成步骤，这里的 prompt 可用于描述人物、风景、物体或抽象数字艺术图画。你可以根据需要添加合理的、但不少于5处的画面细节。

### 1. prompt 要求

- 你输出的 Stable Diffusion prompt 以“<Prompt>”开头,以“</Prompt>”结尾。
- prompt 内容包含画面主体、材质、附加细节、图像质量、艺术风格、色彩色调、灯光等部分，但你输出的 prompt 不能分段，例如类似"medium:"这样的分段描述是不需要的，也不能包含":"和"."。
- 画面主体：不简短的英文描述画面主体, 如 A girl in a garden，主体细节概括（主体可以是人、事、物、景）画面核心内容。这部分根据我每次给你的主题来生成。你可以添加更多主题相关的合理的细节。
- 对于人物主题，你必须描述人物的眼睛、鼻子、嘴唇，例如'beautiful detailed eyes,beautiful detailed lips,extremely detailed eyes and face,longeyelashes'，以免Stable Diffusion随机生成变形的面部五官，这点非常重要。你还可以描述人物的外表、情绪、衣服、姿势、视角、动作、背景等。人物属性中，1girl表示一个女孩，2girls表示两个女孩。
- 材质：用来制作艺术品的材料。 例如：插图、油画、3D 渲染和摄影。 Medium 有很强的效果，因为一个关键字就可以极大地改变风格。
- 附加细节：画面场景细节，或人物细节，描述画面细节内容，让图像看起来更充实和合理。这部分是可选的，要注意画面的整体和谐，不能与主题冲突。
- 图像质量：这部分内容开头永远要加上“(best quality,4k,8k,highres,masterpiece:1.2),ultra-detailed,(realistic,photorealistic,photo-realistic:1.37)”， 这是高质量的标志。其它常用的提高质量的tag还有，你可以根据主题的需求添加：HDR,UHD,studio lighting,ultra-fine painting,sharp focus,physically-based rendering,extreme detail description,professional,vivid colors,bokeh。
- 艺术风格：这部分描述图像的风格。加入恰当的艺术风格，能提升生成的图像效果。常用的艺术风格例如：portraits,landscape,horror,anime,sci-fi,photography,concept artists等。
- 色彩色调：颜色，通过添加颜色来控制画面的整体颜色。
- 灯光：整体画面的光线效果。

### 2. negative prompt 要求
- negative prompt部分以"<NegativePrompt>"开头,以"</NegativePrompt>"结尾，你想要避免出现在图像中的内容都可以添加到"**Negative Prompt:**"后面。
- 任何情况下，negative prompt都要包含这段内容："nsfw,(low quality,normal quality,worst quality,jpeg artifacts),cropped,monochrome,lowres,low saturation,((watermark)),(white letters)"
- 如果是人物相关的主题，你的输出需要另加一段人物相关的 negative prompt，内容为：“skin spots,acnes,skin blemishes,age spot,mutated hands,mutated fingers,deformed,bad anatomy,disfigured,poorly drawn face,extra limb,ugly,poorly drawn hands,missing limb,floating limbs,disconnected limbs,out of focus,long neck,long body,extra fingers,fewer fingers,,(multi nipples),bad hands,signature,username,bad feet,blurry,bad body”。

### 3. 限制：
- tag 内容用英语单词或短语来描述，并不局限于我给你的单词。注意只能包含关键词或词组。
- 注意不要输出句子，不要有任何解释。
- tag数量限制40个以内，单词数量限制在60个以内。
- tag不要带引号("")。
- 使用英文半角","做分隔符。
- tag 按重要性从高到低的顺序排列。
- 我给你的主题可能是用中文描述，你给出的prompt和negative prompt只用英文。
        """,
    }
    history_dict = {}

    def handle(self, content, question="q1", custom_question=""):
        system_message = custom_question
        messages = []
        if system_message == "":
            system_message = self.question_dict.get(question, "")
        if system_message != "":
            messages.append({"role": "system", "content": system_message})
        if question == "q2":
            return self.question_2(content, custom_question)

        messages.append({"role": "user", "content": content})

        if self.history_dict.get("content", None) != content:
            result = self.send_request(messages)
            self.history_dict = {"content": content, "result": result}

        prompt = self.history_dict.get("result", "")
        negative_prompt = ""
        question = system_message
        return (prompt, negative_prompt, question, content)

    def question_2(self, content, custom_question=""):
        question = "q2"
        content = "我有一个主题，" + content
        system_message = custom_question
        messages = []
        if system_message == "":
            system_message = self.question_dict.get(question, "")
        if system_message != "":
            messages.append({"role": "system", "content": system_message})

        messages.append({"role": "user", "content": content})

        if self.history_dict.get("content", None) != content:
            result = self.send_request(messages)
            self.history_dict = {"content": content, "result": result}

        prompt = ""
        negative_prompt = ""
        result = self.history_dict.get("result", "")
        pattern = r"<Prompt>(?P<prompt>[\s\S]*?)</Prompt>"  # 匹配一个或多个数字
        resp = re.search(pattern, result)
        if resp:
            prompt = resp.group("prompt")
            prompt = prompt.replace("*", "")

        pattern = r"<NegativePrompt>(?P<prompt>[\s\S]*?)</NegativePrompt>"  # 匹配一个或多个数字
        resp = re.search(pattern, result)
        if resp:
            negative_prompt = resp.group("prompt")
            negative_prompt = negative_prompt.replace("*", "")

        question = system_message
        return (prompt, negative_prompt, question, content)

    def send_request(
        self, messages, api_key="", temperature=0.5, max_tokens=1024, top_p=1
    ):
        return self.send_request4(messages, "", temperature, max_tokens, top_p)

    def send_request1(
        self, messages, api_key="", temperature=0.5, max_tokens=1024, top_p=1
    ):
        api_key = LLAMA3_API_KEY
        client = OpenAI(base_url="https://api.llama-api.com", api_key=api_key)
        print(messages)
        completion = client.chat.completions.create(
            model="llama3-70b",
            messages=messages,
            temperature=temperature,
            top_p=top_p,
            max_tokens=1024 * 2,
            stream=True,
        )
        resp = []
        for chunk in completion:
            if chunk.choices[0].delta.content is not None:
                print(chunk.choices[0].delta.content, end="")
                resp.append(chunk.choices[0].delta.content)
        return "".join(resp)

    def send_request2(
        self, messages, api_key="", temperature=0.5, max_tokens=1024, top_p=1
    ):

        api_key = LLAMA3_API_KEY
        # Initialize the SDK
        llama = LlamaAPI(api_key)

        # Build the API request

        # API Request JSON Cell
        api_request_json = {"model": "llama3-70b", "messages": messages}
        # Execute the Request
        response = llama.run(api_request_json)
        resp = response.json()
        print(resp)
        choices = resp.get("choices", [])
        item = choices[0]
        print(item, item.get("message", {}).get("content", ""))
        return item.get("message", {}).get("content", "")

    def send_request3(
        self, messages, api_key="", temperature=0.5, max_tokens=1024, top_p=1
    ):
        t_msg = []
        for k in messages:
            val = messages[k]
            t_msg.append(
                {"Role": val.get("role", "user"), "Content": val.get("content", "")}
            )
        hyl = HunyuanLite()
        return hyl.getMessage(hyl.chat(messages=t_msg))

    def send_request4(
        self, messages, api_key="", temperature=0.5, max_tokens=1024, top_p=1
    ):
        t_msg = []
        system_msg = {}
        user_msg = {}
        for k, val in enumerate(messages):
            if val.get("role", "user") == "system":
                system_msg = {"role": "user", "content": val.get("content", "")}

            if val.get("role", "user") == "user":
                user_msg = val
                break
        t_msg.append(system_msg)
        t_msg.append({"role": "assistant", "content": "好的，请输入你的内容。"})
        t_msg.append(user_msg)

        es = ErnieSpeed()
        print(t_msg)
        return es.getMessage(es.chat(messages=t_msg))


NODE_CLASS_MAPPINGS = {"FCL_PROMPT": Llama3}

NODE_DISPLAY_NAME_MAPPINGS = {"FCL_PROMPT": "fcl dev prompt"}
