import gradio as gr
import baize_rag
import copy
import pathlib
import jieba
import debug
import baize_gp_util
import baize_config

args = debug.get_args()
model, tokenizer = debug._load_model_tokenizer(args)
new_msg = 0


class BaizeEasyDB:

    def __init__(self):
        self.database = []

    def spilt_append(self, text, max_length=128):
        len(text)
        for i in range(0, len(text), max_length):
            self.database.append(text[i : i + max_length])

    def append(self, texts: list):
        self.database.extend(texts)

    def query(self, _query) -> str:
        ret = ""
        match = -1
        qset = set(jieba.cut(_query))
        for x in self.database:
            _cur_match = len(set(jieba.cut(x)).intersection(qset))
            if _cur_match > match:
                match = _cur_match
                ret = x
        return ret

    def clear(self):
        self.database.clear()


db = BaizeEasyDB()
s2t = baize_rag.Speech2Text(baize_config.DEFAULT_WHISPER_MODEL_PATH)

def add_message(history, message):
    global new_msg

    if message["text"] == ":clear":
        db.clear()
        return history, gr.MultimodalTextbox(value=None, interactive=False)

    for x in message["files"]:
        history.append(((x,), None))
        new_msg += 1
    if message["text"] is not None:
        new_msg += 1
        history.append((message["text"], None))

    return history, gr.MultimodalTextbox(value=None, interactive=False)


def bot(history):
    global new_msg
    historycopy = copy.deepcopy(history[-new_msg:])
    print("hisotrycopy", new_msg, historycopy)
    new_msg = 0

    for user_input, llm_output in historycopy:
        if isinstance(user_input, str):
            # 解析网页
            if user_input.startswith("http"):
                _url = user_input
                history.append([None, "**正在读取网页** " + _url])
                title, html_content = baize_rag.get_webpage(_url)
                history.append(
                    [
                        None,
                        f"**网页内容**\n{title}\n```text\n{html_content}\n```\n**请问您有什么问题吗？**",
                    ]
                )
                db.spilt_append(html_content)
                yield history
            # 对话
            else:
                _query = user_input
                print("query", _query)
                history.append([None, ""])

                ref = db.query(_query)
                if ref:
                    _query = f"请利用以下知识知识：```{ref}```\n进行总结，回答我的问题：{_query}"
                print(_query)

                db.clear()

                for character in baize_gp_util.chat_stream(
                    model, tokenizer, _query, []
                ):
                    # for character in "FAKE RESPONSE FOR DEBUG":
                    history[-1][1] += character
                    yield history

        else:
            try:
                _file = user_input[0]
                path = pathlib.Path(_file)
                file_type = path.suffix
                text_to_add = ""
                if file_type == ".pdf":
                    history.append([None, f"**正在处理文档**"])
                    document_text = baize_rag.pdf2text(path)
                    history.append([None, f"```text\n{document_text}\n```"])
                    text_to_add = document_text
                elif file_type in ".mp4.mp3.wav":
                    history.append([None, f"**正在处理媒体数据**"])
                    speech_text = s2t.transcribe(path)
                    history.append([None, f"```text\n{speech_text}\n```"])
                    text_to_add = speech_text
                else:
                    history.append([None, f"**不受支持的格式**"])
                db.spilt_append(text_to_add)
            except Exception as e:
                history.append([None, e.__str__()])

        print(db.database)
    yield history


with gr.Blocks(
    theme=gr.themes.Soft(),
) as demo:
    gr.HTML(
        """
<h1>白泽</h1>面向RISC-V和openkylin的本地问答、多模态知识检索增强生成助手
"""
    )

    chatbot = gr.Chatbot(
        [],
        elem_id="chatbot",
        label=None,
        show_label=False,
        bubble_full_width=False,
        height="60vh",
        placeholder="<h1 style='color: #AAAAAA;'>输入感兴趣的话题，或 文档、视频、音频、网页URL</h1>",
    )
    task_history = gr.State([])

    chat_input = gr.MultimodalTextbox(
        interactive=True,
        placeholder="输入感兴趣的话题，或 文档、视频、音频、网页URL",
        show_label=False,
    )

    chat_msg = chat_input.submit(
        add_message, [chatbot, chat_input], [chatbot, chat_input]
    )
    bot_msg = chat_msg.then(bot, chatbot, chatbot, api_name="bot_response")
    bot_msg.then(lambda: gr.MultimodalTextbox(interactive=True), None, [chat_input])

    gr.HTML(
        """\
白泽的LLM使用阿里通义千问Qwen1.5（Qwen2Beta）开源的权重，使用请遵守Qwen1.5的许可协议限制。用户不应传播及不应允许他人传播包括但不限于仇恨言论、暴力、色情、欺诈相关的有害信息。"""
    )

    # chatbot.like(print_like_dislike, None, None)


demo.queue()
if __name__ == "__main__":
    demo.launch(
        inbrowser=args.inbrowser,
        server_name=args.server_name,
        server_port=args.server_port,
    )
