from transformers import AutoTokenizer, AutoModelForCausalLM, AutoConfig
import torch
from moe_train import LLM, Config
from torch_npu.contrib import transfer_to_npu
torch.npu.set_compile_mode(jit_compile=False)


t = AutoTokenizer.from_pretrained('/media/nvme1n1_dist/tokenizer_demo/model/')
AutoConfig.register("moe_model", Config)
AutoModelForCausalLM.register(Config, LLM)
model = AutoModelForCausalLM.from_pretrained('./saves/moe')
model.npu().eval()
strs=["描述一本经济学专业书籍的内容和特点。",
          "撰写一篇关于管理沟通的文章，包括沟通原则和管理技巧。",
          "编写一段向上司介绍公司新产品的宣传文案。"]
for str in strs:
    input_data = t.apply_chat_template([{'role':'user', 'content':str}])
    # input_data = [t.bos_token_id] + t.encode(str)
    print(input_data)

    for token in model.generate({"input_ids":torch.tensor(input_data).unsqueeze(0).npu(), "labels":None}, t.eos_token_id, 512, stream=False,temperature=0.8, top_k=5):
        print(t.decode(token[0]))