import os
import logging
from configs.model_config import *
from prompt_generate.models import PromptGen
from llm.models import LargeLanguageModel
from stable_diffusion.image_generate import text_2_image
IMG_PATH = os.path.join(
    os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
    "img"
)

logger = logging.getLogger()

# LLM 使用历史数据的长度
LLM_HISTORY_LEN = 3


class ChatBot:
    llm_model: object = None
    prompt_model: object = None

    def init_cfg(
        self,
        prompt_model: str = PROMPT_MODEL,
        prompt_device = PROMPT_DEVICE,
        llm_history_len = LLM_HISTORY_LEN,
        llm_model: str = LLM_MODEL,
        llm_device = LLM_DEVICE
    ):
        self.prompt = PromptGen(
            model_path=prompt_model_dict[prompt_model]
        )
        self.llm_history_len = llm_history_len
        self.llm = LargeLanguageModel(
            model_path=llm_model_dict[llm_model]
        )  # TODO llm_device 未使用

    def chat(self, chat_history):
        question = chat_history[-1][0]
        # 判断走哪条流程
        if question.startswith("/image"):
            logging.info(" ***** 用户请求了 sd 模型 *****")
            print(" ***** 用户请求了 sd 模型 *****")
            raw_prompt = question[6:]
            logging.info("用户原始提示词：{}".format(raw_prompt))
            print("用户原始提示词：{}".format(raw_prompt))
            new_prompt = self.prompt.process(raw_prompt)
            # new_prompt = "generate prompt: {}".format(raw_prompt)
            logging.info("生成的新提示词为：{}".format(new_prompt))
            print("生成的新提示词为：{}".format(new_prompt))

            logging.info("调用 sd 模型，请稍后……")
            print("调用 sd 模型，请稍后……")
            file_path = None
            try:
                file_path = text_2_image(new_prompt)
                if os.path.exists(file_path):
                    resp = (file_path, "image")
                else:
                    resp = "Image generation failed. Please try again later!"
            except Exception as e:
                print(e)
                resp = "The image generation service cannot be accessed. Please contact the administrator!"
            # resp = (os.path.join(IMG_PATH, "InstrucionTuning-001.jpg"), "image")
            logging.info("已生成图像，添加到聊天历史中")
            print("已生成图像，添加到聊天历史中")
        else:
            logging.info(" ***** 用户请求了 llm 模型 ***** ")
            print(" ***** 用户请求了 llm 模型 ***** ")
            logging.info("参考历史构建完整提示")
            print("参考历史构建完整提示")
            history = chat_history[:-1]
            if history:
                prompt = ""
                count = 0
                for u, b in history[::-1]:
                    if u is None or u.startswith("/image") or not isinstance(b, str):
                        continue
                    prompt = "\nQuestion: {}\nAnswer: {}\n".format(u, b) + prompt
                    count += 1
                    if count >= self.llm_history_len:
                        break
                prompt += "\nQuestion: {}\nAnswer: ".format(question)
            else:
                prompt = question
            logging.info("最终提示为\n{}".format(prompt))
            print("最终提示为\n{}".format(prompt))
            logging.info("调用 llm 模型，请稍后……")
            print("调用 llm 模型，请稍后……")
            resp = self.llm.process(prompt)
            # resp = "我是机器人"
            logging.info("已获取应答，添加到聊天历史中")
            print("已获取应答，添加到聊天历史中")
        chat_history[-1][1] = resp
        return chat_history
