import re
import gradio as gr
from doc_search import ES
from doc_search_memory import VectorSearch
from model.chatglm_llm import ChatLLM
import openai
from configs.params import ModelParams
from fs import FileSys
openai.api_key = "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"
PROMPT_TEMPLATE = """已知信息：
{context} 

根据上述已知信息，简洁和专业的来回答用户的问题。如果无法从中得到答案，请说 “根据已知信息无法回答该问题” 或 “没有提供足够的相关信息”，不允许在答案中添加编造成分，答案请使用中文。 问题是：{question}"""

model_config = ModelParams()
# es = ES(model_config.embedding_model)
es = VectorSearch(model_config.embedding_model)
# llm = ChatLLM()
# llm.load_llm()


def clear_session():
    return '', [], ''


def search_doc(question, search_method, top_k, knn_boost, threshold):
    res = es.doc_search(method=search_method, query=question, top_k=top_k, knn_boost=knn_boost)
    if threshold > 0:
        result = [i for i in res if i['score'] > threshold]
    else:
        result = res
    return result


# def doc_format(doc_list):
#     result = ''
#     for i in doc_list:
#         source = re.sub('data/', '', i['source'])
#         result += f"source: {source}\nscore: {i['score']}\ncontent: {i['content']}\n"
#     return result
def doc_format(doc_list):
    result = ''
    for i in doc_list:
        # source = re.sub('data/', '', i['source'])
        result += f"\ntitle: {i['title']}\ncontent: {i['content']}\n"
    return result

def predict(question, search_method, top_k, max_token, temperature, top_p, knn_boost, history, history_length,
            threshold):
    # llm.max_token = max_token
    # llm.temperature = temperature
    # llm.top_p = top_p
    # llm.history_len = history_length
    search_res = search_doc(question, search_method, top_k, knn_boost, threshold)
    search_result = doc_format(search_res)

    informed_context = ''
    for i in search_res:
        informed_context += i['content'] + '\n'
    answers = []
    for i in search_res:
        informed_context += i['content'] + '\n'
        if len(i['content']) > 300:
            content = i['content'][:300]
        else:
            content = i['content']
        answers.append({'text':content})
    # print(prompt(question, answers))
    gpt_answer = openai.ChatCompletion.create(
        temperature=temperature,
        model="gpt-3.5-turbo",
        messages=prompt(question, answers),
        max_tokens=max_token,
        top_p=top_p 
    )
    gpt_answer = gpt_answer.choices[0].message.content

    history.append([question, gpt_answer])
    return history, history,  search_result, ""
    

def prompt(question, answers):
    """
    生成对话的示例提示语句，格式如下：
    demo_q:
    使用以下段落来回答问题，如果段落内容不相关就返回未查到相关信息："成人头疼，流鼻涕是感冒还是过敏？"
    1. 普通感冒：您会出现喉咙发痒或喉咙痛，流鼻涕，流清澈的稀鼻涕（液体），有时轻度发热。
    2. 常年过敏：症状包括鼻塞或流鼻涕，鼻、口或喉咙发痒，眼睛流泪、发红、发痒、肿胀，打喷嚏。
    demo_a:
    成人出现头痛和流鼻涕的症状，可能是由于普通感冒或常年过敏引起的。如果病人出现咽喉痛和咳嗽，感冒的可能性比较大；而如果出现口、喉咙发痒、眼睛肿胀等症状，常年过敏的可能性比较大。
    system:
    你是一个医院问诊机器人
    """
    demo_q = '使用以下段落来回答问题："成人头疼，流鼻涕是感冒还是过敏？"\n1. 普通感冒：您会出现喉咙发痒或喉咙痛，流鼻涕，流清澈的稀鼻涕（液体），有时轻度发热。\n2. 常年过敏：症状包括鼻塞或流鼻涕，鼻、口或喉咙发痒，眼睛流泪、发红、发痒、肿胀，打喷嚏。'
    demo_a = '成人出现头痛和流鼻涕的症状，可能是由于普通感冒或常年过敏引起的。如果病人出现咽喉痛和咳嗽，感冒的可能性比较大；而如果出现口、喉咙发痒、眼睛肿胀等症状，常年过敏的可能性比较大。'
    system = '你是一个机器人, 参考之前的回答方式，对提出的问题进行回答'
    q = '使用以下段落来回答问题，如果段落内容不相关就生成相关的内容："'
    q += question + '"'
    # 带有索引的格式
    # for index, answer in enumerate(answers):
    #     q += str(index + 1) + '. ' + str(answer['title']) + ': ' + str(answer['text']) + '\n'
    for index, answer in enumerate(answers):
        q += str(index + 1) + '. ' + str(answer['text']) + '\n'
    """
    system:代表的是你要让GPT生成内容的方向，在这个案例中我要让GPT生成的内容是医院问诊机器人的回答，所以我把system设置为医院问诊机器人
    前面的user和assistant是我自己定义的，代表的是用户和医院问诊机器人的示例对话，主要规范输入和输出格式
    下面的user代表的是实际的提问
    """
    res = [
        {'role': 'system', 'content': system},
        {'role': 'user', 'content': demo_q},
        {'role': 'assistant', 'content': demo_a},
        {'role': 'user', 'content': q},
    ]
    return res



if __name__ == "__main__":
    title = """
    # 基于GPT的智能个人知识库
    """
    with gr.Blocks() as demo:
        gr.Markdown(title)

        with gr.Row():
            with gr.Column(scale=2):
                chatbot = gr.Chatbot()
                user_input = gr.Textbox(show_label=False, placeholder="Input...", lines=4, container=False)
                with gr.Row():
                    submitBtn = gr.Button("Submit", variant="primary")
                    emptyBtn = gr.Button("Clear History")
            search_out = gr.Textbox(label="文档交互", lines=25, max_lines=25, interactive=False, scale=1)

        with gr.Row(variant='compact'):
            with gr.Column():
                gr.Markdown("""LLM设置""")
                max_length = gr.Slider(0, 4097, value=2048, step=1.0, label="Maximum length", interactive=True)
                top_p = gr.Slider(0, 1, value=0.8, step=0.01, label="Top P", interactive=True)
                temperature = gr.Slider(0, 1, value=0.01, step=0.01, label="Temperature", interactive=True)
                history_length = gr.Slider(0, 10, value=3, step=1, label="history_length", interactive=True)

            with gr.Column():
                gr.Markdown("""查询设置""")
                search_method = gr.Radio(['近似查询', '混合查询', '精确查询'],
                                         value='精确查询',
                                         label="Search Method")
                threshold = gr.Number(label="查询阈值(0为不设限)", value=0.00, interactive=True)
                top_k = gr.Slider(0, 10, value=3, step=1.0, label="top_k", interactive=True)
                knn_boost = gr.Slider(0, 1, value=0.5, step=0.1, label="knn_boost", interactive=True)

            with gr.Column():
                gr.Markdown("""知识库管理""")
                file = gr.File(label='请上传知识库文件', file_types=['.txt', '.md', '.doc', '.docx'])
                chunk_size = gr.Number(label="chunk_size", value=300, interactive=True)
                chunk_overlap = gr.Number(label="chunk_overlap", value=10, interactive=True)
                doc_upload = gr.Button("存入知识库")
                # 存在文件系统中
                upload_btn = gr.Button("上传文件")
                download_btn = gr.Button("下载文件")
                getlist_btn = gr.Button("获取文件列表")

        history = gr.State([])

        submitBtn.click(predict,
                        inputs=[user_input, search_method, top_k, max_length, temperature, top_p, knn_boost, history,
                                history_length, threshold],
                        outputs=[chatbot, history, search_out, user_input]
                        )
        doc_upload.click(
            fn=es.doc_upload,
            show_progress=True,
            inputs=[file, chunk_size, chunk_overlap],
            outputs=[search_out],
        )

        emptyBtn.click(fn=clear_session, inputs=[], outputs=[chatbot, history, search_out], queue=False)


        upload_btn.click(fn=FileSys.save_file, inputs=[file], outputs=[search_out], queue=False)
        download_btn.click(fn=FileSys.get_file, inputs=[], outputs=[search_out], queue=False)
        getlist_btn.click(fn=FileSys.get_file_list, inputs=[], outputs=[search_out], queue=False)

    demo.queue().launch(share=False, inbrowser=True, server_name="0.0.0.0", server_port=8000)
