import gradio as gr
from threading import Thread
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
import torch

model_id = r"E:\models\Qwen2.5-0.5B-Instruct"
model = AutoModelForCausalLM.from_pretrained(model_id)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
tokenizer = AutoTokenizer.from_pretrained(model_id)
history = []


def question_answer(query):
    global history
    # 将新的用户输入加入历史记录
    history.append({"role": "user", "content": query})
    message = history
    # message = [{"role": "user", "content": query}]
    conversion = tokenizer.apply_chat_template(message, add_generation_prompt=True, tokenize=False)
    encoding = tokenizer(conversion, return_tensors="pt").to(device)
    streamer = TextIteratorStreamer(tokenizer)
    generation_kwargs = dict(encoding, streamer=streamer, max_new_tokens=1000, do_sample=True, temperature=0.2)
    thread = Thread(target=model.generate, kwargs=generation_kwargs)
    thread.start()

    generate_text = ''
    for new_text in streamer:
        output = new_text.replace(conversion, '')
        if output:
            generate_text += output
            if generate_text.endswith("<|im_end|>"):
                generate_text = generate_text[:-len("<|im_end|>")]
            yield generate_text
    # 将模型生成的回复加入历史记录
    history.append({"role": "assistant", "content": generate_text})

demo = gr.Interface(
    fn=question_answer,
    inputs=gr.Textbox(lines=3, placeholder="your question...", label="Question"),
    outputs="text",
)

demo.launch()