import torch
import torch_npu# 华为NPU的PyTorch扩展

torch_device = "npu:1" # 0~7，取决于系统硬件有几个npu
torch.npu.set_device(torch.device(torch_device))# 将PyTorch的当前设备设置为指定的NPU
torch.npu.set_compile_mode(jit_compile=False)# 设置NPU的编译模式，禁用即时编译

option = {}
option["NPU_FUZZY_COMPILE_BLACKLIST"] = "Tril"
torch.npu.set_option(option)
from transformers import AutoModelForCausalLM, AutoTokenizer
option = {}
option["NPU_FUZZY_COMPILE_BLACKLIST"] = "Tril"
torch.npu.set_option(option)
from transformers import AutoModelForCausalLM, AutoTokenizer

DEFAULT_CKPT_PATH = '../dir1'# 设置预训练模型的默认路径
model = AutoModelForCausalLM.from_pretrained(
    DEFAULT_CKPT_PATH,
    torch_dtype=torch.float16,
).npu().eval()# 将模型转换为NPU版本，并设置为评估模式
# 加载与模型匹配的分词器
tokenizer = AutoTokenizer.from_pretrained(DEFAULT_CKPT_PATH)
while True:
    prompt = input("user:")
    if prompt == "exit":
        break
    messages = [
        {"role": "system", "content": "You are a helpful assistant."},
        {"role": "user", "content": prompt}
    ]
      # 使用分词器的apply_chat_template方法将消息转换为模型可以处理的格式
    text = tokenizer.apply_chat_template(
        messages,
        tokenize=False,
        add_generation_prompt=True
    )
    model_inputs = tokenizer([text], return_tensors="pt").to(torch_device)# 将文本转换为模型输入，并指定设备为之前设置的NPU
    
    # 使用模型生成回复
    # max_new_tokens参数指定了模型最多可以生成的新令牌数
    generated_ids = model.generate(
        model_inputs.input_ids,
        max_new_tokens=512
    )
    # 处理生成的ID，去除输入部分，只保留生成的回复部分
    generated_ids = [
        output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
    ]
    # 使用分词器解码生成的ID为文本
    response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
    print("Qwen2-7B-Instruct:",response)

