import os
import re
import json
import argparse
from pathlib import Path
from tqdm import tqdm
import pandas as pd
import numpy as np
from sklearn.metrics import accuracy_score, f1_score, confusion_matrix, cohen_kappa_score
from transformers import Qwen2_5_VLForConditionalGeneration, AutoProcessor
from qwen_vl_utils import process_vision_info

# --------- 工具函数 ---------
def parse_filename(fname):
    m = re.match(r"^(\d+)_([^.]+)\.[A-Za-z0-9]+$", fname)
    if not m:
        return None
    try:
        idx = int(m.group(1))
    except ValueError:
        return None
    stage = m.group(2)
    return idx, stage

def is_image_file(path):
    ext = os.path.splitext(path)[1].lower()
    return ext in {".png", ".jpg", ".jpeg", ".webp", ".gif", ".bmp"}

Stage2Idx = {"W": 0, "N1": 1, "N2": 2, "N3": 3, "R": 4}
Idx2Stage = {v: k for k, v in Stage2Idx.items()}

# --------- 新输出解析函数 ---------
def parse_model_output(out_text: str):
    """解析模型生成的包含 JSON 的回答，返回 (sleep_stage, reasoning_text, applicable_rules, error_msg)

    约定：微调后模型输出与训练数据 assistant 一致，即包含形如：
    ```json\n{\n  "sleep_stage": "N2",\n  "reasoning_text": "...",\n  "applicable_rules": "N2.1"\n}\n``` 或去掉 fenced code 的 JSON。

    兼容处理：
    1. 优先截取 ```json 或 ``` fenced code 中第一段合法 JSON。
    2. 若无 fenced code，搜索第一个包含 "sleep_stage" 关键词的花括号块。
    3. 去除潜在多余反引号、前后文本与 BOM。
    4. 容错：尝试修剪尾部多余逗号，或者替换全角引号（若存在）。
    失败时返回 (None, None, None, 错误信息)。
    """
    text = out_text.strip()
    raw_json_str = None
    error = None

    try:
        # 1. fenced code block 优先
        fence_match = re.search(r"```(?:json)?\s*([\s\S]*?)```", text, re.IGNORECASE)
        if fence_match:
            candidate = fence_match.group(1).strip()
            # 有些模型会在 json 前再加语言标签行，去掉首尾反引号残留
            raw_json_str = candidate
        else:
            # 2. 直接找第一个包含 sleep_stage 的 {...}
            # 允许换行，非贪婪
            brace_match = re.search(r"\{[\s\S]*?\}", text)
            if brace_match and 'sleep_stage' in brace_match.group(0):
                raw_json_str = brace_match.group(0)

        if raw_json_str is None:
            raise ValueError("未找到 JSON 结构")

        # 清理：去掉可能的首尾 ```json 标记及多余字符
        raw_json_str = raw_json_str.strip().lstrip('`').rstrip('`').strip()

        # 替换全角引号（如果出现）
        raw_json_str = raw_json_str.replace('“', '"').replace('”', '"').replace('’', "'")

        # 尝试直接解析
        try:
            data = json.loads(raw_json_str)
        except json.JSONDecodeError:
            # 容错：去掉行首行尾可能的注释或多余逗号
            cleaned = re.sub(r",\s*}\s*$", "}\n", raw_json_str)
            cleaned = re.sub(r",\s*]", "]", cleaned)
            data = json.loads(cleaned)

        sleep_stage = data.get("sleep_stage")
        reasoning_text = data.get("reasoning_text")
        applicable_rules = data.get("applicable_rules")

        if isinstance(sleep_stage, str):
            sleep_stage = sleep_stage.strip().upper()
        if sleep_stage not in Stage2Idx:
            raise ValueError(f"sleep_stage 非法: {sleep_stage}")

        # 规范化文本字段
        if isinstance(reasoning_text, str):
            reasoning_text = reasoning_text.strip()
        if isinstance(applicable_rules, str):
            applicable_rules = applicable_rules.strip()

        return sleep_stage, reasoning_text, applicable_rules, None
    except Exception as e:
        error = str(e)
        return None, None, None, error

# --------- 主流程 ---------
def main():
    parser = argparse.ArgumentParser(description="Qwen2.5-VL-3B-Instruct 测试集五分类评估")
    parser.add_argument("--ckpt_dir", type=str, default=None)
    parser.add_argument("--system_prompt", type=str, default=None)
    parser.add_argument("--subjects_json", type=str, default=None)
    parser.add_argument("--subjects_split", type=str, default=None)
    parser.add_argument("--img_dir", type=str, default=None)
    parser.add_argument("--batch_size", type=int, default=32)
    parser.add_argument("--output_jsonl", type=str, default=None)
    parser.add_argument("--output_xlsx", type=str, default=None)
    args = parser.parse_args()

    ckpt_dir = args.ckpt_dir        
    system_prompt_file = args.system_prompt
    subjects_json = args.subjects_json
    subjects_split = args.subjects_split
    img_dir = args.img_dir
    batch_size = args.batch_size
    output_jsonl = args.output_jsonl
    output_xlsx = args.output_xlsx

    ckpt_tag = os.path.basename(ckpt_dir)
    output_jsonl = output_jsonl.replace(".jsonl", f"_{ckpt_tag}.jsonl")
    output_xlsx = output_xlsx.replace(".xlsx", f"_{ckpt_tag}.xlsx")

    # print args
    print("--ckpt_dir:", ckpt_dir)
    print("--system_prompt_path:", system_prompt_file)
    print("--subjects_json:", subjects_json)
    print("--subjects_split:", subjects_split)
    print("--img_dir:", img_dir)
    print("--batch_size:", batch_size)
    print("--output_jsonl:", output_jsonl)
    print("--output_xlsx:", output_xlsx)

    os.makedirs(os.path.dirname(output_jsonl), exist_ok=True)
    os.makedirs(os.path.dirname(output_xlsx), exist_ok=True)

    # 读取 system prompt
    with open(system_prompt_file, "r", encoding="utf-8") as f:
        system_prompt = f.read()

    # 读取 subjects
    with open(subjects_json, "r", encoding="utf-8") as f:
        data = json.load(f)

    if 'test' in subjects_split:
        subjects = sorted(set(data["test_subjects"]))
    elif 'val' in subjects_split:
        subjects = sorted(set(data["val_subjects"]))
    elif 'train' in subjects_split:
        subjects = sorted(set(data["train_subjects"]))
    else:
        raise ValueError(f"未知的 subjects_split: {subjects_split}")

    # 收集所有测试样本
    samples = []
    for sub_id in tqdm(subjects, desc="Collect samples"):
        sub_dir = os.path.join(img_dir, sub_id)
        if not os.path.isdir(sub_dir):
            continue
        files = [f for f in os.listdir(sub_dir) if is_image_file(f)]
        epochs = []
        for f in files:
            parsed = parse_filename(f)
            if not parsed:
                continue
            idx, stage = parsed
            epochs.append({
                "idx": idx,
                "stage": stage,
                "path": os.path.join(sub_id, f),
                "sub_id": sub_id
            })
        epochs.sort(key=lambda x: x["idx"])
        for ep in epochs:
            custom_id = f"{sub_id}#{ep['idx']}_{ep['stage']}"
            samples.append({
                "custom_id": custom_id,
                "sub_id": sub_id,
                "image_path": os.path.join(img_dir, ep["path"]),
                "stage": ep["stage"],
                "label": Stage2Idx.get(ep["stage"], -1)
            })
    if not samples:
        print("No test samples found!")
        return

    # 加载模型和处理器
    model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
        ckpt_dir,
        dtype="bfloat16",
        # attn_implementation="flash_attention_2",
        device_map="auto"
    )
    processor = AutoProcessor.from_pretrained(
        ckpt_dir, 
        use_fast=True
    )
    # model.eval()

    # 批量推理
    results = []
    for i in tqdm(range(0, len(samples), batch_size), desc="Inference"):
    # for i in tqdm(range(0, 10, batch_size), desc="Inference"):
        batch = samples[i:i+batch_size]
        texts = []
        image_inputs = []
        for s in batch:
            messages = [
                {"role": "system", "content": system_prompt},
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "下面请分析这张PSG epoch波形图："},
                        {"type": "image", "image": s["image_path"]},
                    ],
                },
            ]
            # print(messages)
            # print(s["custom_id"], s["stage"])
            text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
            texts.append(text)
            img_in, _ = process_vision_info(messages)
            image_inputs.append(img_in)
        inputs = processor(
            text=texts,
            images=image_inputs,
            padding=True,
            return_tensors="pt",
        )
        inputs = inputs.to("cuda")
        # with torch.no_grad():
        generated_ids = model.generate(
            **inputs, 
            max_new_tokens=512, 
            top_p=0.8, 
            temperature=0.7
        )
        generated_ids_trimmed = [
            out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
        ]
        output_texts = processor.batch_decode(
            generated_ids_trimmed, 
            skip_special_tokens=True, 
            clean_up_tokenization_spaces=False
        )
        for s, out_text in zip(batch, output_texts):
            sleep_stage, reasoning_text, applicable_rules, parse_error = parse_model_output(out_text)
            if sleep_stage is not None:
                pred = Stage2Idx.get(sleep_stage, -1)
            else:
                pred = -1
            results.append({
                "custom_id": s["custom_id"],
                "sub_id": s["sub_id"],
                "image_path": s["image_path"],
                "stage": s["stage"],  # 真实标签文本
                "label": s["label"],  # 真实标签数值
                "output": out_text,    # 原始模型输出
                "pred": pred,          # 预测数值（-1 表示解析失败）
                "pred_stage": sleep_stage,  # 解析出的阶段标签（可能为 None）
                "reasoning_text": reasoning_text,
                "applicable_rules": applicable_rules,
                "parse_error": parse_error,
            })

    # 确保输出目录存在
    Path(output_jsonl).parent.mkdir(parents=True, exist_ok=True)
    Path(output_xlsx).parent.mkdir(parents=True, exist_ok=True)
    # 保存原始输出
    with open(output_jsonl, "w", encoding="utf-8") as fout:
        for item in results:
            fout.write(json.dumps(item, ensure_ascii=False) + "\n")

    # 统计性能
    df = pd.DataFrame(results)
    valid_mask = df["pred"].isin(list(Idx2Stage.keys()))
    df_valid = df[valid_mask]
    df_invalid = df[~valid_mask]

    # 被试级别统计
    subject_metrics = []
    for sub_id, group in df.groupby("sub_id"):
        y_true = group["label"].values
        y_pred = group["pred"].values
        valid = np.isin(y_pred, list(Idx2Stage.keys()))
        acc = accuracy_score(y_true[valid], y_pred[valid]) if valid.any() else np.nan
        macro_f1 = f1_score(y_true[valid], y_pred[valid], average="macro") if valid.any() else np.nan
        kappa = cohen_kappa_score(y_true[valid], y_pred[valid]) if valid.any() else np.nan
        per_class_f1 = f1_score(y_true[valid], y_pred[valid], average=None, labels=list(Idx2Stage.keys())) if valid.any() else [np.nan]*5
        conf_mat = confusion_matrix(y_true[valid], y_pred[valid], labels=list(Idx2Stage.keys())) if valid.any() else np.full((5,5), np.nan)
        subject_metrics.append({
            "subject": sub_id,
            "n_samples": len(group),
            "n_valid": valid.sum(),
            "n_invalid": (~valid).sum(),
            "accuracy": acc,
            "macro_f1": macro_f1,
            "kappa": kappa,
            **{f"f1_{Idx2Stage[i]}": per_class_f1[i] for i in range(5)},
            "conf_mat": conf_mat.tolist()
        })

    # 整体统计
    y_true_all = df_valid["label"].values
    y_pred_all = df_valid["pred"].values
    acc_all = accuracy_score(y_true_all, y_pred_all) if len(y_true_all) else np.nan
    macro_f1_all = f1_score(y_true_all, y_pred_all, average="macro") if len(y_true_all) else np.nan
    kappa_all = cohen_kappa_score(y_true_all, y_pred_all) if len(y_true_all) else np.nan
    per_class_f1_all = f1_score(y_true_all, y_pred_all, average=None, labels=list(Idx2Stage.keys())) if len(y_true_all) else [np.nan]*5
    conf_mat_all = confusion_matrix(y_true_all, y_pred_all, labels=list(Idx2Stage.keys())) if len(y_true_all) else np.full((5,5), np.nan)

    # 保存 xlsx
    with pd.ExcelWriter(output_xlsx) as writer:
        df_subj = pd.DataFrame(subject_metrics)
        df_subj.drop(columns=["conf_mat"], inplace=True)
        df_subj.to_excel(writer, sheet_name="subject", index=False)
        # sheet2: overall
        df_overall = pd.DataFrame({
            "accuracy": [acc_all],
            "macro_f1": [macro_f1_all],
            "kappa": [kappa_all],
            **{f"f1_{Idx2Stage[i]}": [per_class_f1_all[i]] for i in range(5)}
        })
        df_overall.to_excel(writer, sheet_name="overall", index=False)
        # sheet3: confusion matrix
        df_conf = pd.DataFrame(conf_mat_all, columns=[f"pred_{Idx2Stage[i]}" for i in range(5)], index=[f"true_{Idx2Stage[i]}" for i in range(5)])
        df_conf.to_excel(writer, sheet_name="confusion_matrix")

    print(f"测试集有效样本数: {len(df_valid)}, 异常输出数: {len(df_invalid)}")
    print(f"整体准确率: {acc_all:.4f}, Macro-F1: {macro_f1_all:.4f}, Kappa: {kappa_all:.4f}")
    print(f"混淆矩阵:\n{conf_mat_all}")

if __name__ == "__main__":
    main()
