# -*- coding: utf-8 -*-
# time: 2025/4/9 14:48
# file: data_ready.py
# author: hanson

from transformers import AutoModelForCausalLM, AutoTokenizer
import torch

# 加载微调后的模型
model_path = "./qwen2-0.5b-doctor"
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
    model_path,
    device_map="auto",
    torch_dtype=torch.bfloat16,
    trust_remote_code=True
)

# 测试案例
prompts = [
    "Instruction: 孩童中耳炎耳朵流黄水怎样诊治\nInput: 女宝宝3岁，刚上幼儿园，较近几天，孩子的耳朵有点疼，另外，耳屎很多很黄，并且，好像没什么食欲也很乏力，请问：孩童中耳炎耳朵流黄水怎样诊治。\nOutput:"
   # "Instruction: 写一首七言诗\nInput: 主题是春天\nOutput:"
]

for prompt in prompts:
    inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
    outputs = model.generate(
        **inputs,
        max_new_tokens=200,
        do_sample=True,
        temperature=0.7
    )
    print(f"输入:\n{prompt}\n输出:\n{tokenizer.decode(outputs[0], skip_special_tokens=True)}\n{'='*50}")
