import time
from chatllm.chatio import ChatIO
import gradio as gr
import os,gc

import torch.cuda

import utils.CommonUtils as CommonUtils
from utils.CommonUtils import parse_text
from  utils.contants  import DIR_EMBEDDING_MODELS,DIR_MODEL,DIR_LORA,MODEL_CACHE_PATH,EMBEDDING_DEVICE,PROMET_TEMPLATE,LLM_DEVICE,num_gpus
from langchain.vectorstores import FAISS
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.document_loaders import UnstructuredFileLoader
from utils.chinese_text_splitter import ChineseTextSplitter
from chatllm.chatllm import Parameters
from inspect import isgenerator

import sentence_transformers


answer_mode_knowledge,answer_mode_normal="知识库","普通模式"

class KnowledgeBasedChatLLM:
    chatIO: ChatIO = None
    embeddings: object = None
    prepared: bool=False

    def unload(self):
        self.embeddings = None

    def init_model_config(self, large_language_model,lora_model, embedding_model,onload):
        if self.chatIO is None:
            self.chatIO = ChatIO()
            deployMode = os.getenv('deploy', 'True')
            self.chatIO.deployMode =eval(deployMode)
            print("llm deploy mode:",self.chatIO.deployMode)
        # 卸载大语言模型
        self.chatIO.unload()
        # 卸载embedding_model
        self.unload()
        # gc以及显存清理
        gc.collect()
        if torch.cuda.is_available():
            torch.cuda.empty_cache()
            torch.cuda.ipc_collect()
        model_ret = "模型:" + large_language_model

        self.chatIO.model_name_or_path=os.path.abspath(DIR_MODEL+"/"+large_language_model)
        if not CommonUtils.is_model_empty(lora_model):
            self.chatIO.lora_model_name_or_path=os.path.abspath(DIR_LORA+"/"+lora_model)
            model_ret += "\nLoRA:" + lora_model
        self.chatIO.load_model(llm_device=LLM_DEVICE,onload=onload)




        if not CommonUtils.is_model_empty(embedding_model):
            onload(0.6, "正在加载文本向量模型:" + embedding_model)
            self.embeddings = HuggingFaceEmbeddings(
                model_name=os.path.abspath(DIR_EMBEDDING_MODELS+"/"+embedding_model) )
            self.embeddings.client = sentence_transformers.SentenceTransformer(
                self.embeddings.model_name,
                device=EMBEDDING_DEVICE,
                cache_folder=os.path.join(MODEL_CACHE_PATH,
                                          self.embeddings.model_name))
            model_ret += "\nEmbedding:" + embedding_model
        self.prepared = True
        return model_ret


    def init_knowledge_vector_store(self, filepath,onload):
        print("load file:",filepath)
        onload(0,"load file:"+os.path.basename(filepath))
        docs = self.load_file(filepath)
        print("FAISS Document....")
        onload(0.5, "FAISS Document....")
        vector_store = FAISS.from_documents(docs, self.embeddings)
        print("FAISS Index Save....")
        onload(1, "FAISS Index Save....")
        vector_store.save_local('faiss_index')
        return vector_store
    def load_file(self, filepath):
        if filepath.lower().endswith(".md"):
            loader = UnstructuredFileLoader(filepath, mode="elements")
            docs = loader.load()
        elif filepath.lower().endswith(".pdf"):
            loader = UnstructuredFileLoader(filepath)
            textsplitter = ChineseTextSplitter(pdf=True)
            docs = loader.load_and_split(textsplitter)
        else:
            loader = UnstructuredFileLoader(filepath, mode="elements")
            textsplitter = ChineseTextSplitter(pdf=False)
            docs = loader.load_and_split(text_splitter=textsplitter)
        return docs


    def get_knowledge_based_answer(self,query,top_k: int = 6,temperature: float = 0.01,top_p: float = 0.1,history =[],max_token_length=512, knowledge_template=PROMET_TEMPLATE):
        vector_store = FAISS.load_local('faiss_index', self.embeddings)
        retriever = vector_store.as_retriever(search_kwargs={"k":top_k})
        docs = retriever.get_relevant_documents(query)
        params = Parameters(
            temperature=temperature,
            top_p=top_p,
            max_token_length=max_token_length,
            history=history,
            prompt=query,
            knowledge=docs,
            knowledge_template=knowledge_template
        )
        return self.chatIO.generate(params)
    def get_llm_answer(self,query,temperature: float = 0.01,top_p: float = 0.1,history =[],max_token_length=512):
        params = Parameters(
            temperature=temperature,
            top_p=top_p,
            max_token_length=max_token_length,
            history=history,
            prompt=query,
            knowledge=None,
            knowledge_template=None
        )
        return self.chatIO.generate(params)





knowladge_based_chat_llm = KnowledgeBasedChatLLM()

def refresh_model_list():
    large_language_models = CommonUtils.childrenDir(DIR_MODEL)
    lora_models=["--"]
    lora_models.extend(CommonUtils.childrenDir(DIR_LORA))
    embedding_models=[]
    embedding_models.extend(CommonUtils.childrenDir(DIR_EMBEDDING_MODELS))
    large_language_models_selected=large_language_models[0] if len(large_language_models)>0 else ""

    return gr.Dropdown.update(choices=large_language_models, value=large_language_models_selected),gr.Dropdown.update(choices=lora_models,value=""), gr.Dropdown.update(choices=embedding_models,value="")


def reinit_model(large_language_model,lora_model, embedding_model,progress=gr.Progress()):
    if CommonUtils.is_model_empty(large_language_model):
        raise gr.Error("请选择大语言模型")
    def onload(percent,message):
        progress(percent,message)
    ret = knowladge_based_chat_llm.init_model_config(
        large_language_model=large_language_model,
        lora_model=lora_model,
        embedding_model=embedding_model,onload=onload)
    return ret


def init_vector_store(file_obj,progress=gr.Progress()):
    if file_obj is None:
        raise gr.Error("请先上传知识库文件")
    if knowladge_based_chat_llm.embeddings is None:
        raise gr.Error("尚未加载模型,请先加载模型")
    def onload(percent,message):
        progress(percent,message)
    vector_store = knowladge_based_chat_llm.init_knowledge_vector_store(file_obj.name,onload)
    return "向量化完成"



def predict(message,chatbot,answer_mode,knowledge_template,top_k,top_p,history_len,temperature,max_token_length,history):
    if message is None or message == "":
        yield chatbot, history,""
    elif not knowladge_based_chat_llm.prepared:
        chatbot = [(parse_text(message), "模型尚未加载,请先加载模型")]
        yield chatbot,[],""
    elif answer_mode == answer_mode_knowledge  and  knowladge_based_chat_llm.embeddings is None:
        chatbot = [(parse_text(message), "文本向量模型尚未加载,请先加载")]
        yield chatbot,[],""
    else:
        history = history[-history_len:] if history_len > 0 else []
        print("history_len:",len(history))
        chatbot.append((parse_text(message), ""))
        if answer_mode == answer_mode_knowledge:
            generate = knowladge_based_chat_llm.get_knowledge_based_answer(query=message,knowledge_template=knowledge_template,
                                                                           top_k=top_k, top_p=top_p,temperature=temperature,
                                                                           history=history,max_token_length=max_token_length)
        else:
            generate = knowladge_based_chat_llm.get_llm_answer(query=message,top_p=top_p,temperature=temperature, history=history,
                                                               max_token_length=max_token_length)

        if isgenerator(generate):
            for reps in generate:
                chatbot[-1] = (parse_text(message), parse_text(reps))
                history = chatbot[-history_len:] if history_len > 0 else []
                yield chatbot, history,""


        else:
            chatbot[-1] = (parse_text(message), parse_text(generate))
            history = chatbot[-history_len:] if history_len > 0 else []
            yield chatbot, history ,""


def clear_session():
    return [], []




with gr.Blocks(title="推理", css="#fixed_size_img {height: 240px;}") as chatbotApp:
    gr.Markdown("""<h1><center>ChatLLM可视化Demo</center></h1>
              <center><font size=3>
              本项目基于LangChain和大型语言模型系列模型, 提供基于本地知识的自动问答应用. 
              提醒：<br>
                 知识不能包含某些特殊字符，否则将返回error.
              </center></font>
              """)
    with gr.Row():
        with gr.Column(scale=1):
            with gr.Accordion("模型选择"):
                large_language_model = gr.Dropdown(choices=[],label="大语言模型",value="",interactive=True)
                lora_model = gr.Dropdown(choices=[], label="LoRA模型", value="",interactive=True)
                embedding_model = gr.Dropdown(choices=[],label="文本向量模型",value="",interactive=True)
                refresh_model_btn = gr.Button("刷新模型列表")
                load_model_button = gr.Button("重新加载模型")
            with gr.Accordion("知识库文件"):
                file = gr.File(label='请上传知识库文件',
                               file_types=['.txt', '.md', '.docx', '.pdf'])
                file_state = gr.Textbox(label="向量化结果",placeholder="")
                gr.Examples( [["examples/knowledge_example.md"]],inputs=file)
                init_vs = gr.Button("知识库文件向量化")

            with gr.Accordion("模型参数配置"):
                answer_mode = gr.Radio(label="问答模式", choices=[answer_mode_knowledge,answer_mode_normal],
                                                       value=answer_mode_normal)
                history_len = gr.Slider(minimum=0, maximum=8, value=3, step=1, label="保留历史对话轮数",
                                        interactive=True)
                top_k = gr.Slider(minimum=1,maximum=10,value=1,step=1,label="vector search top k",interactive=True)
                top_p = gr.Slider(minimum=0,maximum=1, value=0.9,step=0.1, label="top_p",interactive=True)
                temperature = gr.Slider(minimum=0, maximum=1, value=0.01,  step=0.01,label="temperature", interactive=True)
                max_token_length = gr.Slider(minimum=256, maximum=2048, value=512, step=1, label="max_token_length", interactive=True)
                knowledge_template = gr.TextArea(label="知识库问答模板",value=PROMET_TEMPLATE)



        with gr.Column(scale=4):
            model_state_box = gr.Textbox(value="模型尚未加载", label="模型加载状态")
            chatbot = gr.Chatbot([],
                                 label='ChatLLM').style(height=450)
            history = gr.State([])
            message = gr.Textbox(label='请输入问题')
            with gr.Row():
                send = gr.Button("🚀 发送")
                clear_history = gr.Button("🧹 清除历史对话")

    # 刷新模型列表事件
    refresh_model_btn.click(refresh_model_list, inputs=[],
                            outputs=[large_language_model, lora_model, embedding_model])

    # 模型加载事件
    load_model_button.click(reinit_model, show_progress=True,
                            inputs=[large_language_model, lora_model, embedding_model], outputs=[model_state_box])
    # 知识库文件向量化事件
    init_vs.click(init_vector_store, show_progress=True, inputs=[file], outputs=[file_state])

    # 消息发送事件
    send.click(predict,inputs=[message,chatbot,answer_mode,knowledge_template,top_k,top_p,history_len,temperature,max_token_length,history],outputs=[chatbot,history,message], show_progress=True)
    message.submit(predict,inputs=[message,chatbot,answer_mode,knowledge_template,top_k,top_p,history_len,temperature,max_token_length,history],outputs=[chatbot,history,message],show_progress=True)
    clear_history.click(fn=clear_session,inputs=[],outputs=[chatbot, history], queue=False)