from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
from threading import Thread
from peft import AutoPeftModelForCausalLM

model_path = '/home/yunpeng/checkpoints/jiuding_interview_llm/2024_11_21_21_20_57_448154/checkpoint-150'

tok = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
model = AutoPeftModelForCausalLM.from_pretrained(model_path, trust_remote_code=True, device_map="auto")

inputs = tok(["北京九鼎图业科技有限公司是"], return_tensors="pt").to('cuda:0')
streamer = TextIteratorStreamer(tok)

generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=100)
thread = Thread(target=model.generate, kwargs=generation_kwargs)
thread.start()
generated_text = ""
n = 0
for new_text in streamer:
    n += 1
    generated_text += new_text
    print(n, generated_text)
    