print("正在初始化，请耐心等候……")
from datetime import datetime

# 获取当前时间
from transformers import AutoTokenizer, AutoModelForCausalLM, TextGenerationPipeline

import torch

import torch_npu

model_path='/home/linweibin/lwb2/model/v1/MindLLM-1b3-chat-zh-v2.0'
device='npu:0'

print(f"{device}")

#print(torch_npu.npu.is_available())

tokenizer = AutoTokenizer.from_pretrained(model_path)
tokenizer.max_length = 1024

model = AutoModelForCausalLM.from_pretrained(model_path).to(device)


generator = TextGenerationPipeline(model=model, tokenizer=tokenizer, device=device)

context = "<user>\n周杰伦有哪些代表作歌曲?\n<assistant>\n"

t1 = datetime.now()
outputs = generator(context, max_new_tokens=1024, do_sample=True, num_beams=8, repetition_penalty=0.5, no_repeat_ngram_size=5, return_full_text=False)
print("<assistant>: " + outputs[0]['generated_text'] + "\n\n")
t2 = datetime.now()
second = (t2 - t1).total_seconds()
print(f"耗时{second}")


"""

outputs = generator(context, max_new_tokens=1024, do_sample=True, num_beams=8, repetition_penalty=0.5, no_repeat_ngram_size=5, return_full_text=False)
print("<assistant>: " + outputs[0]['generated_text'] + "\n\n")
t3= datetime.now()
second = (t3 - t2).total_seconds()
print(f"耗时{second}")


outputs = generator(context, max_new_tokens=1024, do_sample=True, num_beams=8, repetition_penalty=0.5, no_repeat_ngram_size=5, return_full_text=False)
print("<assistant>: " + outputs[0]['generated_text'] + "\n\n")
t4= datetime.now()
second = (t4 - t3).total_seconds()
print(f"耗时{second}")

"""