from transformers import AutoTokenizer
from transformers.models.qwen2 import Qwen2ForCausalLM
import torch

model_path = "/home/featurize/work/models/qwen2.5_7B_traditional_chinese_medicine"

# 加载分词器和模型
tokenizer = AutoTokenizer.from_pretrained(
    model_path,
    trust_remote_code=True,
    local_files_only=True
)

model = Qwen2ForCausalLM.from_pretrained(
    model_path,
    trust_remote_code=True,
    torch_dtype=torch.float16,
    device_map="auto",
    local_files_only=True
).eval()

# 使用 Qwen2.5 的正确生成方式
prompt = "请解析一下中医所说的气"

# 使用聊天模板构建输入
messages = [
    {"role": "system", "content": "你是一个有帮助的助手"},
    {"role": "user", "content": prompt}
]

# 应用聊天模板
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)

# 编码输入
inputs = tokenizer([text], return_tensors="pt").to(model.device)

# 生成回复
outputs = model.generate(
    **inputs,
    max_new_tokens=512,
    pad_token_id=tokenizer.eos_token_id,
    eos_token_id=tokenizer.eos_token_id,
    do_sample=True,
    temperature=0.7,
    top_p=0.9
)

# 解码并打印回复
response = tokenizer.decode(outputs[0][inputs.input_ids.shape[1]:], skip_special_tokens=True)
print(response)