import os
import shutil

import gradio as gr
from ConversationChain import prompt_Structure
from ChatGLM import ChatGLM


def delete_file():
    print("删除中")
    path = "C:\\Users\\33668\\PycharmProject\\GLM_Main\\database1\\add"
    shutil.rmtree(path)
    os.mkdir(path)
    print("删除成功")


class gradio_interface:
    llm = None
    chain = None
    structure = None
    history = None

    def __init__(self):
        self.structure = prompt_Structure()
        self.llm = ChatGLM()
        self.chain = self.structure.get_chain(self.llm)
        self.structure.load_new('C:\\Users\\33668\\PycharmProject\\GLM_Main\\database1\\database')
        self.history = []

    def interface(self):
        with gr.Blocks() as demo:
            history = gr.State([])
            user_question = gr.State("")
            with gr.Row(equal_height=True):
                with gr.Column(scale=5):
                    with gr.Row():
                        chatbot = gr.Chatbot(elem_id="chatbot", height=500)
                    with gr.Row():
                        with gr.Column(scale=12):
                            user_input = gr.Textbox(
                                show_label=False, placeholder="Enter text", container=False
                            )
                        with gr.Column(min_width=70, scale=1):
                            submitBtn = gr.Button("Send")
                with gr.Column():
                    with gr.Column(min_width=50, scale=1):
                        with gr.Tab(label="Parameter Setting"):
                            gr.Markdown("# Parameters")
                            top_p = gr.Slider(
                                minimum=-0,
                                maximum=1.0,
                                value=0.95,
                                step=0.05,
                                interactive=True,
                                label="Top-p",
                            )
                            temperature = gr.Slider(
                                minimum=0.1,
                                maximum=2.0,
                                value=1,
                                step=0.1,
                                interactive=True,
                                label="Temperature",
                            )
                            max_length_tokens = gr.Slider(
                                minimum=0,
                                maximum=2048,
                                value=512,
                                step=8,
                                interactive=True,
                                label="Max Generation Tokens",
                            )
                        with gr.Tab(label="input_File"):
                            input_file = gr.File()

            predict_args = dict(
                fn=self.predict,
                inputs=[
                    user_question,
                    history,
                    top_p,
                    temperature,
                    max_length_tokens
                ],
                outputs=[chatbot],
                show_progress=True,
            )
            ##实现读取输入框数据，并将其转化为模型的输入数据
            # Chatbot
            transfer_input_args = dict(
                fn=self.transfer_input, inputs=[user_input], outputs=[user_question, user_input, submitBtn],
                show_progress=True
            )
            # 直接按ENTER
            user_input.submit(**transfer_input_args).then(**predict_args)
            # 点提交按钮
            submitBtn.click(**transfer_input_args).then(**predict_args)

            input_file.upload(fn=self.new_loader_file, inputs=input_file)

            input_file.clear(fn=delete_file, inputs=input_file)

        demo.queue(concurrency_count=1).launch()

    def predict(self,
                text,
                history,
                top_p,
                temperature,
                max_length_tokens):
        self.llm.top_p = top_p
        self.llm.max_length = max_length_tokens
        self.llm.temperature = temperature
        res = self.chain(text)
        history.append((text, res['response']))
        return history

    # 将向量存储方法放在这里。
    def transfer_input(self, inputs):
        # 一次性返回，降低延迟
        textbox = self.reset_textbox()
        return (
            inputs,
            gr.update(value=""),
            gr.Button.update(visible=True),
        )

    def reset_textbox(self):
        return gr.update(value=""), ""

    def loader_file(self, file):
        print("用户上传了文件")
        path = "C:\\Users\\33668\\PycharmProject\\GLM_Main\\database1\\add"
        with open(path + file.name.split('\\')[-1], "w", encoding="utf-8") as f:
            with open(file.name, "r", encoding="utf-8") as ff:
                f.write(ff.read())
        self.structure.load_new(path)
        print("上传成功")

    def new_loader_file(self, file):
        print("用户上传了文件")
        with open(file.name, "r", encoding="utf-8") as f:
            content = f.read()
            file_name = f.name.split("\\")[-1]
            text_file = f"C:\\Users\\33668\\PycharmProject\\GLM_Main\\database1\\add\\{file_name}"
            directory = "C:\\Users\\33668\\PycharmProject\\GLM_Main\\database1\\add"
            # print(f.name.split("\\")[0])
            with open(text_file, "w", encoding="utf-8") as new_f:
                new_f.write(content)
        self.structure.load_new(directory)
        # f.write(f"C:\\Users\\33668\\PycharmProject\\GLM_Main\\database1\\add\\{file_name}")

        # structure.load_new()
        # print(content)
        # return content


gradio_interface().interface()
