"""
evaluate.py
-----------
批量评估脚本：把模型在一批 QA 上的输出和期望输出对比，并保存到文件
每行都有中文注释，直接修改变量即可
"""

import json
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
from peft import PeftModel
from datasets import load_dataset
from tqdm import tqdm  # 进度条显示

# ---------------- 配置区 -----------------
BASE_MODEL = r"D:\models\qwen\qwen\Qwen3-0___6B"  # 基础模型路径
ADAPTER = r"./qwen-finetune-output/checkpoint-6"  # LoRA adapter 路径
EVAL_FILE = r"./data/eval.jsonl"  # 评估数据路径
OUTPUT_FILE = r"./eval-output/eval_result.jsonl"  # 保存评估结果
MAX_NEW_TOKENS = 100  # 模型生成长度
# -----------------------------------------

# 加载 tokenizer
tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL, use_fast=False)

# 加载 base 模型
model = AutoModelForCausalLM.from_pretrained(
    BASE_MODEL,
    torch_dtype=torch.float16,  # 半精度
    device_map="auto"  # 自动分配到 GPU
)

# 加载 LoRA adapter
model = PeftModel.from_pretrained(model, ADAPTER)

# 设置为评估模式，关闭 dropout
model.eval()

# 加载评估数据集，每行 json: {"instruction": "...", "output": "..."}
ds = load_dataset("json", data_files=EVAL_FILE, split="train")

results = []

# 遍历每条评估数据
for ex in tqdm(ds):
    # 问题
    q = ex["instruction"]
    # 期望答案（有些可能没有 output，默认空字符串）
    expected = ex.get("output", "")
    # 拼接 prompt
    prompt = f"用户：{q}\nAI："
    # 转成模型输入
    inputs = tokenizer(prompt, return_tensors="pt").to(model.device)

    # 不计算梯度
    with torch.no_grad():
        # 模型生成答案
        out = model.generate(
            **inputs,
            max_new_tokens=MAX_NEW_TOKENS,
            do_sample=False  # 关闭采样，生成固定答案
        )
    # 解码生成文本
    gen = tokenizer.decode(out[0], skip_special_tokens=True)

    # 保存到结果列表
    results.append({
        "instruction": q,
        "expected": expected,
        "generated": gen
    })

# 保存到文件，每行一个 JSON
with open(OUTPUT_FILE, "w", encoding="utf-8") as f:
    for r in results:
        f.write(json.dumps(r, ensure_ascii=False) + "\n")

print("评估完成，结果已保存到", OUTPUT_FILE)
