# 导入Hugging Face Transformers相关库
from transformers import AutoModelForCausalLM, AutoTokenizer

# 指定模型名称
MODEL = "Qwen/Qwen1.5-1.8B-Chat"
MODEL = r"D:\models\qwen3-0.6b"

# 加载训练好的模型和分词器
# tokenizer和model要一一对应
tokenizer = AutoTokenizer.from_pretrained(MODEL, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(MODEL, trust_remote_code=True, device_map='auto')
# 模型设为评估状态
model.eval()
# 定义测试示例
examples = [
    {
        "instruction": "使用中医知识正确回答适合这个病例的中成药。",
        "input": "我这段时间感觉身体不太对劲，有腹泻的迹象，面黄肌瘦，吃点什么中成药能改善？"
    },
    {
        "instruction": "使用中医知识正确回答适合这个病例的中成药。",
        "input": "我昨天开始咳嗽，感觉喉咙痛，痰又稠又黄，还感觉有点发热。"
    }
]
# 测试模型生成结果
for example in examples:
    # context = f"Instruction: {example['instruction']}\nInput: {example['input']}\nAnswer: "
    context = f"Instruction: {example['instruction']}\nInput: {example['input']}\nAnswer: "
    # 对输入文本进行编码
    inputs = tokenizer(context, return_tensors="pt") 
    # 模型生成回复
    outputs = model.generate(inputs.input_ids.to(model.device), max_length=512, num_return_sequences=1, no_repeat_ngram_size=2)
    # 对回复内容进行解码
    answer = tokenizer.decode(outputs[0], skip_special_tokens=True)
    print(f"Input\n: {example['input']}")
    print(f"Output\n: {answer}\n")