"""
test_base_model.py
------------------
微调前测试 base 模型回答
每行都有注释，直接改值即可
"""

from transformers import AutoTokenizer, AutoModelForCausalLM
import torch

# base 模型路径
BASE_MODEL = r"D:\models\qwen\qwen\Qwen3-0___6B"

# 想测试的问题列表
QUESTIONS = [
    "你是谁？",
    "你能做什么？",
    "给我讲个笑话",
    "你叫什么名字？"
]

# 加载 tokenizer
tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL, use_fast=False)

# 加载 base 模型
model = AutoModelForCausalLM.from_pretrained(
    BASE_MODEL,
    torch_dtype=torch.float16,  # 半精度
    device_map="auto"  # 自动分配到 GPU
)
model.eval()  # 设置评估模式

# 遍历每个问题生成回答
for q in QUESTIONS:
    prompt = f"用户：{q}\n小易哥："  # prompt 格式和训练一致
    inputs = tokenizer(prompt, return_tensors="pt").to(model.device)

    with torch.no_grad():
        out = model.generate(**inputs, max_new_tokens=100)

    answer = tokenizer.decode(out[0], skip_special_tokens=True)
    print(f"问题：{q}")
    print(f"回答：{answer}\n")
