from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import torch_npu  # 引入NPU支持的torch_npu库

torch.manual_seed(0)

tokenizer_path = '/dev/shm/sunpeng/master/MindSpeed-LLM/model_from_hf/mincpm'
tokenizer = AutoTokenizer.from_pretrained(tokenizer_path)

# 将设备映射设置为NPU并设置数据类型为float32
model = AutoModelForCausalLM.from_pretrained(
    tokenizer_path, 
    torch_dtype=torch.float32,  # NPU上推荐使用float32
    device_map={'': 'npu'},  # 设备映射为NPU
    trust_remote_code=True
)

# 确保模型和张量在NPU上
model = model.to('npu')

# 使用模型生成响应
responds, history = model.chat(
    tokenizer, 
    "请写一篇关于人工智能的文章，详细介绍人工智能的未来发展和隐患。", 
    temperature=0.7, 
    top_p=0.7
)

print(responds)