from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import torch_npu
from torch_npu.contrib import transfer_to_npu
torch.npu.set_compile_mode(jit_compile=False)
# 选择模型
# model_name = "/media/nvme1n1_dist/Qwen2.5-0.5B-Instruct"
model_name = "/media/nvme1n1_dist/temp/llm_related-main/s1_from_scratch/s1"

# 加载 tokenizer 和模型
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    device_map="auto",  # 自动将模型放到 GPU / CPU
    torch_dtype=torch.float16  # 如果不支持 float16，请用 torch.float32
)

# 聊天历史（支持多轮）
messages = [
    {"role": "system", "content": "You are Qwen, created by Alibaba Cloud. You are a helpful assistant."},
    {"role": "user", "content": "求下列乘積除以 1000 時的餘數：9 × 99 × 999 × ⋯ × (999 個 9)"}
]

# 构造模型输入
input_ids = tokenizer.apply_chat_template(
    messages,
    return_tensors="pt",
    add_generation_prompt=True
).to(model.device)

# 推理（生成回复）
outputs = model.generate(
    input_ids=input_ids,
    max_new_tokens=256,
    do_sample=True,
    temperature=0.7,
    top_p=0.9
)

# 解码输出
response = tokenizer.decode(outputs[0][input_ids.shape[-1]:], skip_special_tokens=True)
print("🤖 Assistant:", response)
