import os
from threading import Thread
from transformers import TextIteratorStreamer
import gradio as gr
from openmind import AutoModelForCausalLM, AutoTokenizer
import torch

# 设置环境变量
os.environ["OPENMIND_HUB_ENDPOINT"] = "https://modelers.cn"

def load_model():
    device = "npu:0"
    model_path = "toolsmanhehe/Qwen3_14B_cat_maid"
    tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
    model = AutoModelForCausalLM.from_pretrained(
        model_path, torch_dtype=torch.bfloat16, trust_remote_code=True
    ).to(device)
    return model, tokenizer

def chat(content, history):
    _history = []
    _history.append([content, ""])
    
    if isinstance(history, list):
        list_history = history
        history = []
    for h in list_history:
        question, response = h
        history.append({"role": "user", "content": question})
        history.append({"role": "bot", "content": response})

    prompt = ""
    for user_msg, bot_msg in history:
        prompt += f"用户：{user_msg}\n助手：{bot_msg}\n"
    prompt += f"用户：{content}\n助手："

    inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
    streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)

    generation_kwargs = {
        "input_ids": inputs["input_ids"],
        "attention_mask": inputs["attention_mask"],
        "max_new_tokens": 512,
        "temperature": 0.7,
        "top_p": 0.9,
        "streamer": streamer,
    }

    thread = Thread(target=model.generate, kwargs=generation_kwargs)
    thread.start()

    buffer = ""
    for token in streamer:
        buffer += token
        _history[-1][1] = buffer
        yield _history[-1][1]

    print(_history)

if __name__ == "__main__":
    model, tokenizer = load_model()
    gr.ChatInterface(
        fn=chat,
        title="欢迎光临猫娘咖啡厅",
        description="本案例使用基于qwen3模型，500条猫娘数据集微调后的模型实现。",
        examples=["宝宝我有点小困了捏（摸摸宝宝的头)", "宝宝！（十分委屈的看着宝宝，哭泣）"]
    ).launch(debug=True)
