from modelscope import  AutoTokenizer,AutoModelForCausalLM
import gradio as gr
from loguru import logger

def load_model():
    global tokenizer, model
    tokenizer = AutoTokenizer.from_pretrained("./Qwen-1_8B-Chat-Int4", revision='master', trust_remote_code=True)
    model = AutoModelForCausalLM.from_pretrained(
        "./Qwen-1_8B-Chat-Int4", revision='master',
        device_map="auto",
        trust_remote_code=True
    ).eval()

def predict(input, chatbot, history):
    chatbot.append((input,""))
    response, history = model.chat(tokenizer, input, history)
    chatbot[-1] = (input, response)
    yield chatbot, history

def reset_user_input():
    return gr.update(value='')

# def reset_state():
#     model.clean_history()
#     return []

if __name__ == "__main__":
    load_model()
    with gr.Blocks() as demo:
        gr.HTML("""<h1 align="center">qwen1.8b</h1>""")
        chatbot = gr.Chatbot()
        with gr.Row():
            with gr.Column(scale=4):
                with gr.Column(scale=12):
                    user_input = gr.Textbox(show_label=False, placeholder="Input...", lines=10)
                with gr.Column(min_width=32, scale=1):
                    submitBtn = gr.Button("Submit", variant="primary")
            # with gr.Column(scale=1):
            #     emptyBtn = gr.Button("Clear History")

        history = gr.State([])
        past_key_values = gr.State(None)

        submitBtn.click(predict, [user_input, chatbot, history],[chatbot, history], show_progress=True)
        submitBtn.click(reset_user_input, [], [user_input])
        # emptyBtn.click(reset_state, outputs=[chatbot, history, past_key_values], show_progress=True)


    demo.queue().launch(share=False, inbrowser=True, server_name='0.0.0.0')