import os
import json
import random
import argparse


def extract_monitor_subset(src_dir, tar_dir, monitor_number=500, seed=42):
    """
    从 Alpaca 格式的多基准测试文件夹中抽取子集（monitor set），并生成：
      1. 各自 500 样本子集；
      2. 所有 benchmark 汇总文件；
      3. 指定四类 benchmark (compare, judge, diagnosis, choice) 的汇总文件。
    """
    random.seed(seed)
    os.makedirs(tar_dir, exist_ok=True)

    src_info_path = os.path.join(src_dir, "dataset_info.json")
    tar_info_path = os.path.join(tar_dir, "dataset_info.json")

    if not os.path.exists(src_info_path):
        raise FileNotFoundError(f"❌ dataset_info.json not found in {src_dir}")

    with open(src_info_path, "r", encoding="utf-8") as f:
        dataset_info = json.load(f)

    new_info = {}
    all_samples = []
    key_subset_samples = []  # ✅ 只收集 compare/judge/diagnosis/choice 四类

    # -------------------------------
    # 🔁 遍历每个 benchmark，采样并写出
    # -------------------------------
    for name, meta in dataset_info.items():
        file_name = meta.get("file_name")
        if not file_name.endswith(".json") or "_test" not in file_name:
            continue

        src_path = os.path.join(src_dir, file_name)
        if not os.path.exists(src_path):
            print(f"⚠️  File not found, skip: {src_path}")
            continue

        with open(src_path, "r", encoding="utf-8") as f:
            data = json.load(f)
        if not isinstance(data, list):
            print(f"⚠️  Unexpected format (not list): {file_name}")
            continue

        n = len(data)
        subset = data if n <= monitor_number else random.sample(data, monitor_number)

        # 写子集文件
        prefix, ext = os.path.splitext(file_name)
        new_name = f"{prefix}_{monitor_number}{ext}"
        tar_path = os.path.join(tar_dir, new_name)
        with open(tar_path, "w", encoding="utf-8") as f:
            json.dump(subset, f, ensure_ascii=False, indent=2)

        # 更新 dataset_info
        new_key = f"{name}_{monitor_number}"
        new_meta = meta.copy()
        new_meta["file_name"] = new_name
        new_info[new_key] = new_meta

        # 收集样本
        for item in subset:
            item["_benchmark_name"] = name
        all_samples.extend(subset)

        if len(subset) > 0 and isinstance(subset[0], dict):
            q_type = subset[0].get("q_type", "").lower()
            if q_type in ["compare", "judge", "diagnosis", "choice"]:
                key_subset_samples.extend(subset)
        
        print(f"✅ {file_name}: sampled {len(subset)}/{n} → {new_name}")

    # ======================================================
    # ✅ 汇总全部样本
    # ======================================================
    total_name = f"monitor_total_{monitor_number}.json"
    total_path = os.path.join(tar_dir, total_name)
    with open(total_path, "w", encoding="utf-8") as f:
        json.dump(all_samples, f, ensure_ascii=False, indent=2)

    new_info["monitor_total"] = {
        "file_name": total_name,
        "columns": next(iter(new_info.values()))["columns"] if new_info else {},
        "description": f"汇总所有基准的监控子集，每个约 {monitor_number} 条，共 {len(all_samples)} 条样本。"
    }

    # ======================================================
    # ✅ 汇总指定四类样本（compare/judge/diagnosis/choice）
    # ======================================================
    key_name = f"monitor_2000.json"
    key_path = os.path.join(tar_dir, key_name)
    with open(key_path, "w", encoding="utf-8") as f:
        json.dump(key_subset_samples, f, ensure_ascii=False, indent=2)

    new_info["monitor_2000"] = {
        "file_name": key_name,
        "columns": next(iter(new_info.values()))["columns"] if new_info else {},
        "description": f"仅包含 compare/judge/diagnosis/choice 四类 benchmark 的监控子集，共 {len(key_subset_samples)} 条样本。"
    }

    # 写出 dataset_info.json
    with open(tar_info_path, "w", encoding="utf-8") as f:
        json.dump(new_info, f, ensure_ascii=False, indent=2)

    print("\n🎯 Done! Saved all monitor subsets to:", tar_dir)
    print(f"🧾 Total monitor samples: {len(all_samples)}")
    print(f"🧾 Core-4 monitor samples: {len(key_subset_samples)}")
    print(f"🧾 New dataset_info.json entries: {len(new_info)}")


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="Extract monitor subset from Alpaca benchmark folder")
    parser.add_argument("--src_dir", type=str, default="/home/zhangpinglu/data0/gy/Dataset/fundusreasoner/Alpaca_data_test", help="Source directory containing benchmarks")
    parser.add_argument("--tar_dir", type=str, default="/home/zhangpinglu/data0/gy/Dataset/fundusreasoner/Alpaca_data_monitor", help="Target directory to save monitor subsets")
    parser.add_argument("--monitor_number", type=int, default=500, help="Number of samples per benchmark")
    parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility")

    args = parser.parse_args()
    extract_monitor_subset(args.src_dir, args.tar_dir, args.monitor_number, args.seed)
