from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer
from peft import PeftModel
import torch
import logging
import threading

# 模型路径
model_name_or_path = "./Qwen2.5-7B-Instruct"
lora_model_path = "./qwen-liucixin-lora"

tokenizer = None
model = None

def initialize_model():
    global tokenizer, model
    if tokenizer is None:
        logging.info("Initializing tokenizer...")
        tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
        if tokenizer.pad_token is None:
            tokenizer.pad_token = tokenizer.eos_token
        logging.info("Tokenizer initialized.")

    if model is None:
        logging.info("Initializing model...")
        model = AutoModelForCausalLM.from_pretrained(
            model_name_or_path,
            device_map="auto",
            torch_dtype=torch.float16,
            trust_remote_code=True
        )
        model = PeftModel.from_pretrained(model, lora_model_path)
        model.eval()
        model.to("cuda")
        logging.info("Model initialized.")


def predict(prompt):
    global tokenizer, model
    if tokenizer is None or model is None:
        initialize_model()

    inputs = tokenizer(prompt, return_tensors="pt", padding=True).to("cuda")

    # 用 TextIteratorStreamer 替代 TextStreamer
    streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)

    generation_kwargs = dict(
        **inputs,
        max_length=500,
        do_sample=True,
        temperature=0.8,
        top_p=0.95,
        top_k=40,
        repetition_penalty=1.1,
        streamer=streamer
    )

    # 在后台线程运行 generate
    thread = threading.Thread(target=model.generate, kwargs=generation_kwargs)
    thread.start()

    # 从 streamer 中逐步取出生成的文本
    for new_text in streamer:
        yield new_text

initialize_model()
if __name__ == '__main__':
    logging.basicConfig(level=logging.INFO)
    while True:
        prompt = input("请输入提示词 q退出")
        if prompt == "q":
            break
        for text in predict(prompt=prompt):
            print(text, end='', flush=True)
        print()
