from threading import Thread

import torch
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
from peft import PeftModel


base_model_dict = {
    'seeledu': '../../jinndata/数据代码/huggingface.co/seeledu/Chinese-Llama-2-7B',
    'FlagAlpha': '../../jinndata/数据代码/huggingface.co/FlagAlpha/Atom-7B',
    'LinkSoul': '../../jinndata/数据代码/huggingface.co/LinkSoul/Chinese-Llama-2-7b'
}

lora_model_dict = {
    'seeledu': 'JinnData/数据代码/huggingface.co/seeledu/Chinese-Llama-2-7B',
    'FlagAlpha': 'JinnData/数据代码/huggingface.co/FlagAlpha/Atom-7B',
    'LinkSoul': 'JinnData/数据代码/huggingface.co/LinkSoul/Chinese-Llama-2-7b'
}


tokenizer = None
model = None
streamer = None

def load_base_model(select_model, select_lora):
    
    global model
    global tokenizer
    global streamer

    #    释放原模型 
    if model or tokenizer:
        tokenizer = None
        model = None
        streamer = None

    #     切换回默认的Lora选项
    if select_lora != 'None':
        select_lora.update(value='None')

    model_id_or_path = base_model_dict[select_model]
    tokenizer = AutoTokenizer.from_pretrained(model_id_or_path, trust_remote_code=True)
    model = AutoModelForCausalLM.from_pretrained(model_id_or_path, load_in_8bit=True, device_map='auto', torch_dtype=torch.float16, trust_remote_code=True)
    model.eval()

    return select_model, select_lora


def build_llama_prompt(instruction, history=None):
    
    if (history is None) or (history == []):
        prompt_template_no_history = (
            "Below is an instruction that describes a task. "
            "Write a response that appropriately completes the request.\n\n"
            "### Instruction:\n{instruction}\n\n### Response:"
        )
        return prompt_template_no_history.format(instruction=instruction)

    prompt_template = (
        "Below is an instruction that describes a task, paired with an chat history as a reference. "
        "Write a response that appropriately completes the request.\n\n"
        "### History:\n{history}\n\n### Instruction:\n{instruction}\n\n### Response:"
    )
    history_list = [ "<s>[Round-{}]Human: {} </s> <s>Assistant: {} </s>".format(round, human, assistant) 
                       for round, (human, assistant) in enumerate(history, 1) ]
    history_text = '\n'.join(history_list)
    return prompt_template.format(instruction=instruction, history=history_text)


def send_query(chatbot, query_txt, history, temperature, top_p, max_length):

    global model
    global tokenizer
    global streamer

    prompt = build_llama_prompt(query_txt, history)
    model_input = tokenizer([prompt], return_tensors="pt", add_special_tokens=False).to('cuda')
    response = tokenizer.decode(model.generate(**model_input, max_new_tokens=100)[0], skip_special_tokens=True)
    history.append((query_txt, response))
    chatbot.append((query_txt, response))

    return chatbot, gr.update(value=""), history

with gr.Blocks() as demo:
    with gr.Row():
        with gr.Column(scale=2, variant='panel'):
            gr.HTML("""<h2 align="center">对话记录</h2>""")
            for i in range(7):
                gr.Button(f"{i}")
            new_btn = gr.Button('新建', variant='primary')
        with gr.Column(scale=8, variant='panel'):
            gr.HTML("""<h2 align="center">对话测试</h2>""")
            chatbot = gr.Chatbot()
            with gr.Group():
                query_txt = gr.Textbox(placeholder='输入您的Prompt...', label='输入')
                submit_btn = gr.Button('提交', variant='primary')
            with gr.Row():
                retry_btn = gr.Button('重试')
                recallmsg_btn = gr.Button('撤回')
                clear_btn = gr.Button('清除')
            gr.Examples(['1','2'], [ query_txt ], label='示例')
        with gr.Column(scale=2, variant='panel'):
            gr.HTML("""<h2 align="center">控制台</h2>""")
            with gr.Column(variant='panel'):

                select_model = gr.Dropdown(choices=['None'] + list(base_model_dict.keys()), value='None', label='模型')
                select_lora = gr.Dropdown(choices=['None'], value='None', label='LORA')
                select_template = gr.Dropdown(choices=['Defaut'], value='Default',  label='Template')

                temperature = gr.Slider(0.0, 1.0, 0.5, label='Temperature')
                top_p = gr.Slider(0.0, 1.0, 0.5, label='Top P')
                max_length = gr.Slider(0, 2048, 512, step=128, label='MAX Length')
            
            with gr.Column(variant='panel'):
                gr.Radio(['不满意', '一般', '满意'], label='当前回答质量')
                gr.Radio(['不准确', '混淆', '准确'], label='当前回答准确性')
    
    history = gr.State([])
    select_model.select(load_base_model, [ select_model, select_lora ], [ select_model, select_lora ])
    query_txt.submit(send_query, [chatbot, query_txt, history, temperature, top_p, max_length], [chatbot, query_txt, history])
demo.queue().launch(server_name='0.0.0.0', server_port=8888)