# -*- coding: utf-8 -*-
"""
文件名: build_med_pub_tests.py

功能：
  - 从整理好的中间件 (interim_dir/cls_annotations.json, interim_dir/split.json) 读取“测试集名单”
  - 不做任何增强，按题型分别构造多模态 Alpaca 格式数据
  - 每个题型单独保存：
        med_pub_test_judge.json
        med_pub_test_compare.json
        med_pub_test_diagnosis.json
        med_pub_test_quality.json
        med_pub_test_quality_plus_diagnosis.json
        med_pub_test_choice.json
  - 更新 alpaca_save_dir/dataset_info.json

说明：
  - 依赖你的 PromptConstructor（已含 choice 题逻辑）
  - Fail-fast：任意异常立刻抛出
"""

import os
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import json
import random
import argparse
from collections import Counter, defaultdict
from loguru import logger
from tqdm import tqdm

from src.utils.prompt_construct import PromptConstructor  # 复用你的构造器

Normal_key = "正常眼底"


# ---------------- 基础IO ----------------

def read_json(path: str):
    with open(path, "r", encoding="utf-8") as f:
        return json.load(f)

def save_json(data, path: str):
    os.makedirs(os.path.dirname(path), exist_ok=True)
    with open(path, "w", encoding="utf-8") as f:
        json.dump(data, f, ensure_ascii=False, indent=2)
    logger.info(f"已保存: {path}")

def setup_logger(log_dir: str):
    os.makedirs(log_dir, exist_ok=True)
    log_path = os.path.join(log_dir, "build_med_pub_tests.log")
    logger.add(log_path, rotation="10 MB", encoding="utf-8", enqueue=True)
    logger.info(f"日志写入: {log_path}")

def merge_dataset_info(alpaca_dir, info_dict):
    info_path = os.path.join(alpaca_dir, "dataset_info.json")
    if os.path.exists(info_path):
        with open(info_path, "r", encoding="utf-8") as f:
            all_info = json.load(f)
    else:
        all_info = {}
    all_info.update(info_dict)
    save_json(all_info, info_path)
    logger.success("dataset_info.json 已更新。")


# ---------------- 主过程 ----------------

def build_med_pub_tests(
    interim_dir: str,
    alpaca_save_dir: str,
    diease_relation_map: str = "./configs/diease_relation.json",
    seed: int = 42,
    log_dir: str = "./experiments/log",
    enable_types = ("quality", "judge", "compare", "diagnosis", "quality+diagnosis", "choice")
):
    """
    从中间件中构造“测试集”的多模态题目，各题型单独保存。

    参数：
        interim_dir: 你的中间文件目录，需包含：
            - cls_annotations.json （gather_data输出，含 image_path / label(List[str]) / quality(str或None)）
            - split.json           （gather_data输出，含 { "train": [...], "test": [...] }）
        alpaca_save_dir: 输出目录
        diease_relation_map: 疾病关系json（用于负样本采样时的黑白名单约束）
        enable_types: 允许构造的题型集合；默认全部
    """
    setup_logger(log_dir)
    random.seed(seed)

    ann_path = os.path.join(interim_dir, "cls_annotations.json")
    split_path = os.path.join(interim_dir, "split.json")

    if not os.path.exists(ann_path) or not os.path.exists(split_path):
        raise FileNotFoundError(f"找不到中间件：{ann_path} 或 {split_path}")

    annotations = read_json(ann_path)
    split_data = read_json(split_path)
    test_keys = split_data.get("test", [])
    if not isinstance(test_keys, list) or len(test_keys) == 0:
        raise ValueError("split.json 中 test 列表为空，无法生成测试集。")

    # 为 PromptConstructor 准备全集疾病名（从 annotations 聚合，保持与训练一致的中文疾病集合）
    all_cn_diseases = []
    for k, v in annotations.items():
        lbl = v.get("label")
        if isinstance(lbl, list):
            for d in lbl:
                if d not in all_cn_diseases:
                    all_cn_diseases.append(d)

    # 构造器（含 choice 题）
    pc = PromptConstructor(all_cn_diseases, seed=seed, diease_relation_map=diease_relation_map)

    # 每类题型各自的样本列表
    # Alpaca 样式：{"instruction": "<image>...", "input": "", "output": "...", "images": [image_path]}
    buckets = {t: [] for t in enable_types}
    type_counter = Counter()

    logger.info(f"开始生成测试集题目，共 {len(test_keys)} 张图；启用题型: {list(enable_types)}")

    for img_name in tqdm(test_keys, desc="构造测试题目"):
        item = annotations.get(img_name)
        if not item:
            continue
        label = item.get("label")        # List[str] or None
        quality = item.get("quality")    # str or None
        image_path = item.get("image_path")
        if not image_path or not os.path.exists(image_path):
            # 测试集严格：图片缺失即报错
            raise FileNotFoundError(f"[测试图像缺失] {img_name} -> {image_path}")

        # label/quality 全空则跳过
        if not label and not quality:
            continue

        qa_dict = pc.construct_prompts(true_diag=label, quality_text=quality, build_choice=True)

        # 只收集启用的题型
        for q_type, qa_list in qa_dict.items():
            if q_type not in enable_types:
                continue
            for q, a in qa_list:
                buckets[q_type].append({
                    "q_type":q_type,
                    "instruction": "<image>" + q,
                    "input": "",
                    "output": a,
                    "images": [image_path]
                })
                type_counter[q_type] += 1

    # 打乱每个题型的样本顺序并保存
    os.makedirs(alpaca_save_dir, exist_ok=True)
    filename_map = {
        "quality": "med_pub_test_quality.json",
        "judge": "med_pub_test_judge.json",
        "compare": "med_pub_test_compare.json",
        "diagnosis": "med_pub_test_diagnosis.json",
        "quality+diagnosis": "med_pub_test_quality_plus_diagnosis.json",
        "choice": "med_pub_test_choice.json",
    }

    for t in enable_types:
        random.shuffle(buckets[t])
        out_path = os.path.join(alpaca_save_dir, filename_map[t])
        save_json(buckets[t], out_path)

    # 统计
    total = sum(type_counter.values())
    logger.info("各题型数量（绝对数 & 占比）:")
    for t, c in type_counter.items():
        ratio = (c / total * 100) if total > 0 else 0.0
        logger.info(f"  - {t:<20}: {c:>6}  ({ratio:5.2f}%)")
    logger.success(f"测试集构造完成，总计 {total} 条。")

    # 更新 dataset_info.json
    ds_info = {}
    for t in enable_types:
        ds_info_key = f"med_pub_test_{t.replace('+', '_')}"
        ds_info[ds_info_key] = {
            "file_name": filename_map[t],
            "columns": {
                "prompt": "instruction",
                "query": "input",
                "response": "output",
                "images": "images"
            }
        }
    merge_dataset_info(alpaca_save_dir, ds_info)

    return {
        "counts": dict(type_counter),
        "total": total,
        "files": {t: filename_map[t] for t in enable_types}
    }


# ---------------- CLI ----------------

def parse_args():
    p = argparse.ArgumentParser(description="从中间件构造 med_pub 测试集（按题型分别输出）。")
    p.add_argument("--interim_dir", type=str,  default="/home/zhangpinglu/data0/gy/Dataset/fundusreasoner/dataset_support/", help="包含 cls_annotations.json 与 split.json 的目录")
    p.add_argument("--alpaca_save_dir", type=str, default="/home/zhangpinglu/data0/gy/Dataset/fundusreasoner/Alpaca_data_test/", help="测试集 Alpaca 文件输出目录")
    p.add_argument("--diease_relation_map", type=str, default="./configs/diease_relation.json", help="疾病关系映射(含黑白名单)")
    p.add_argument("--seed", type=int, default=42)
    p.add_argument("--log_dir", type=str, default="./experiments/log")
    p.add_argument("--types", type=str, default="quality,judge,compare,diagnosis,quality+diagnosis,choice",
                   help="逗号分隔的题型集合")
    return p.parse_args()


if __name__ == "__main__":
    args = parse_args()
    enable_types = tuple([x.strip() for x in args.types.split(",") if x.strip()])
    build_med_pub_tests(
        interim_dir=args.interim_dir,
        alpaca_save_dir=args.alpaca_save_dir,
        diease_relation_map=args.diease_relation_map,
        seed=args.seed,
        log_dir=args.log_dir,
        enable_types=enable_types
    )
