'''
Author: SUNNY
Description: 
Date: 2025-11-24 16:28:22
'''
from modelscope import AutoModelForCausalLM, AutoTokenizer
import torch
import time

start_time = time.time() # 获取当前时间
# 要测量运行时间的代码段
# ...



# 输入模型下载地址
model_name = "/workspace/R1-Distill/DeepSeek-R1-14B"

# 实例化预训练模型
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    # torch_dtype=torch.float16, #半精度
    torch_dtype="auto", #全精度
    device_map="auto",
    low_cpu_mem_usage=True
)

tokenizer = AutoTokenizer.from_pretrained(model_name)

# 创建消息
prompt = "你好，好久不见，请介绍下你自己！"

messages = [{"role":"system","content":"玉皇大帝是谁。"},
            {"role":"user","content":prompt}
            ]

# 分词
text = tokenizer.apply_chat_template(messages,
                                     tokenize=False,
                                     add_generation_prompt=True)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

# 创建回复
generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=512
)
generated_ids = [
    output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]

response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]

print(response)


end_time = time.time() # 获取结束时间
runtime = end_time - start_time # 计算运行时间

print("代码运行时间为：", runtime, "秒")