'''
Author: SUNNY
Description: 
Date: 2025-11-24 16:16:01
'''
from modelscope import AutoModelForCausalLM, AutoTokenizer
import torch

# 输入模型下载地址
model_name = "/workspace/R1-Distill/DeepSeek-R1-14B"

# 实例化预训练模型
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype=torch.float16, #半精度
    # torch_dtype="auto", #全精度
    device_map="auto",
    low_cpu_mem_usage=True
)

tokenizer = AutoTokenizer.from_pretrained(model_name)

messages = [{"role":"system","content":"You are a helpful assistant."}
            # {"role":"user","content":prompt}
            ]

while True:
    new_question = input("请输入问题：")
    if new_question == "clear":
        messages = [messages[0]]
        continue

    messages.append({"role":"user","content":new_question})
    text = tokenizer.apply_chat_template(messages,
                                         tokenize=False,
                                         add_generation_prompt=True)

    model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
    # 创建回复
    generated_ids = model.generate(
        **model_inputs,
        max_new_tokens=512
    )

    generated_ids = [
        output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
    ]

    response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]

    print(response)

    messages.append({"role": "system", "content": response})