# pip install accelerate
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
import torch_npu# 华为NPU的PyTorch扩展

torch_device = "npu:3" #npu编号，可以根据实际情况修改
torch.npu.set_device(torch.device(torch_device))# 将PyTorch的当前设备设置为指定的NPU
torch.npu.set_compile_mode(jit_compile=False)# 设置NPU的编译模式，禁用即时编译

# 设置NPU的编译选项
option = {}
option["NPU_FUZZY_COMPILE_BLACKLIST"] = "Tril"
torch.npu.set_option(option)

tokenizer = AutoTokenizer.from_pretrained("../gemma2")#改为你的模型路径
model = AutoModelForCausalLM.from_pretrained(
    "../gemma2",
    torch_dtype=torch.bfloat16,
).npu().eval()

while True:
    input_text = input("user: ")
    if input_text == "exit":
        break
    input_ids = tokenizer(input_text, return_tensors="pt").to(torch_device)

    outputs = model.generate(**input_ids, max_new_tokens=132)
    print("Gemma2:",tokenizer.decode(outputs[0]))
