# -*- coding: utf-8 -*-
"""
文件: build_alpaca_multimodal.py
作者: zym1105（改进：按题型分文件/按ID划分RL/独立保存目录/统计分布）
日期: 2025-10-10

功能总览：
  1) 读取清洗后的 aier 数据（annotations.json + split.json），产出统一注解；
  2) 按“病人ID（image_name 下划线前）”从训练集中划分 RL 留出集（--rl_num 为 ID 数量）；
  3) 可选对 SFT 训练集做坐标级增强（增强样本合并回训练集）；
  4) 生成 Alpaca 数据：训练集与测试集**分别**按题型分桶并分文件落盘；
  5) train/test 各自维护独立的 dataset_info.json；
  6) 全流程必要处发生异常**立即 raise**，并附带关键调试信息（文件路径、图像名、数量等）。

约定：
  - 题型（q_type）来源于具体 QA 生成器（如 BoxQAGenerator / build_box_qa），本脚本对 q_type **不做硬编码**，
    而是“见啥存啥”，统一以 `aier_coor_qa_{subset}_{q_type}.json` 命名；
  - subset 仅包含 "train_sft" 与 "test" 两类（RL 留出集不产出 Alpaca 数据）。

使用示例：
  python build_alpaca_multimodal.py \
      --data_root /path/to/aier_processed \
      --interim_dir ./experiments/interim/aier_coor_qa \
      --alpaca_train_dir ./experiments/dataset/Alpaca_data/train \
      --alpaca_test_dir  ./experiments/dataset/Alpaca_data/test \
      --enable_enhance \
      --enhanced_save_dir ./experiments/dataset_enhanced/coor_qa \
      --enhance_strategy random \
      --enhance_per_k 2 \
      --enhance_seed 42 \
      --enhance_force_rebuild \
      --rl_num 800 \
      --log_dir ./experiments/log
"""

import os, sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import json
import random
from typing import Dict, List, Tuple, Optional, Any

from tqdm import tqdm
from collections import Counter, defaultdict
from loguru import logger

from src.utils.seg2dect import seg2dect
from src.utils.loc_enhance import CoordImageEnhancer
from src.utils.coor_qa_prompt_construct import build_box_qa, BoxQAGenerator
from src.utils.prompt_construct import PromptConstructor

# ------------------------ 基础工具 ------------------------ #
def merge_dataset_info(root: str, info: Dict[str, Any]):
    """
    将 info 合并入 root 下的 dataset_info.json。
    """
    path = os.path.join(root, "dataset_info.json")
    all_info = read_json(path) if os.path.exists(path) else {}
    all_info.update(info)
    save_json(all_info, path)
    logger.success(f"[dataset_info] 已更新: {path}")

def read_json(p: str) -> Any:
    with open(p, "r", encoding="utf-8") as f:
        return json.load(f)

def save_json(obj: Any, p: str):
    os.makedirs(os.path.dirname(p), exist_ok=True)
    with open(p, "w", encoding="utf-8") as f:
        json.dump(obj, f, ensure_ascii=False, indent=2)
    logger.info(f"[保存] {p}")

def setup_logger(log_dir: str):
    os.makedirs(log_dir, exist_ok=True)
    log_path = os.path.join(log_dir, "coor_qa_pipeline.log")
    logger.add(log_path, rotation="10 MB", encoding="utf-8", enqueue=True)
    logger.info(f"[日志] {log_path}")


# ------------------------ 小工具：按病人 ID 采样 RL 留出集 ------------------------ #
def _extract_patient_id(image_name: str) -> str:
    """
    提取病人 ID：取 image_name 第一个下划线 '_' 之前的部分。
    若无 '_'，则整体作为 ID。
    """
    if not image_name:
        raise ValueError("提取病人ID失败：image_name 为空")
    pos = image_name.find("_")
    return image_name if pos <= 0 else image_name[:pos]

def make_rl_holdout(split: Dict[str, List[str]], rl_num: int = 800, seed: int = 42) -> Dict[str, List[str]]:
    """
    从 split['train'] 中按“病人ID”采样 rl_num 个 ID 作为 RL 留出集，保证同一 ID 不跨集。
    注意：rl_num 表示“ID 的数量”，而非“图像数量”。

    返回:
      {"train_sft": [...], "train_rl": [...], "test": [...]}
    """
    rng = random.Random(seed)
    train_all = list(split.get("train", []))

    # 建立 ID -> image list
    id2imgs: Dict[str, List[str]] = defaultdict(list)
    for img in train_all:
        pid = _extract_patient_id(img)
        id2imgs[pid].append(img)

    all_ids = list(id2imgs.keys())
    rng.shuffle(all_ids)

    rl_num = max(0, min(rl_num, len(all_ids)))  # 边界保护
    rl_ids = set(all_ids[:rl_num])

    train_rl, train_sft = [], []
    for pid, names in id2imgs.items():
        (train_rl if pid in rl_ids else train_sft).extend(names)

    return {"train_sft": train_sft, "train_rl": train_rl, "test": list(split.get("test", []))}


# ------------------------ gather_data（新增：落盘 simple_annotations） ------------------------ #
def gather_data(
    data_root: str,
    global_split_path: str,
    save_dir: str
) -> Tuple[Dict[str, Any], Dict[str, List[str]], str]:
    """
    收集 aier 数据集下各疾病文件夹中的 annotations，生成统一 simple_annotations.json 并落盘。
    返回:
      annotations(dict), split(dict), simple_annotations_path(str)
    """
    logger.info("开始收集坐标类标注数据 ...")
    os.makedirs(save_dir, exist_ok=True)

    # 读取全局划分
    if not os.path.exists(global_split_path):
        raise FileNotFoundError(f"找不到全局划分文件: {global_split_path}")
    split = read_json(global_split_path)

    anno_path = os.path.join(data_root, "annotations.json")
    if not os.path.exists(anno_path):
        raise FileNotFoundError(f"找不到标注文件: {anno_path}")
    data = read_json(anno_path)

    annotations: Dict[str, Any] = {}
    for image_name, item in data.items():
        diag = item.get("diagnosis", {})
        dec = diag.get("detection", {})
        record = {"image_name": image_name, "image_path": item.get("image_path")}
        if not record["image_path"] or not os.path.exists(record["image_path"]):
            raise FileNotFoundError(f"[图像缺失] {image_name} -> {record['image_path']}")
        if dec:
            record["detection"] = dec

        # 分类与质量字段
        cls_cn = diag.get("classification", {}).get("text", None)
        if cls_cn:
            record["label"] = [cls_cn] if isinstance(cls_cn, str) else cls_cn
        quality = item.get("quality") or diag.get("quality")
        if quality:
            record["quality"] = quality

        # split 判定
        if image_name not in split.get("train", []) and image_name not in split.get("test", []):
            raise ValueError(f"图像 {image_name} 不在 global split 中，请检查 split.json")

        annotations[image_name] = record

    # 落盘 simple_annotations 供后续增强使用
    simple_path = os.path.join(save_dir, "aier_simple_annotations.json")
    save_json(annotations, simple_path)
    logger.success(f"收集完成：共 {len(annotations)} 条；simple_annotations => {simple_path}")
    return annotations, split, simple_path


# ------------------------ 构建增强 ------------------------ #
def build_enhanced_coor_qa(
    base_annotations_path: str,
    enhanced_save_dir: str,
    count_strategy: str = "random",
    per_image_k: int = 2,
    op_probs: Optional[Dict[str, float]] = None,
    seed: int = 42,
    force_rebuild: bool = False,
    allow_keys: Optional[set] = None
) -> Dict[str, Any]:
    """
    从 simple_annotations.json 生成增强版本（坐标级增强）。
    """
    if force_rebuild and os.path.exists(enhanced_save_dir):
        import shutil
        shutil.rmtree(enhanced_save_dir)
        logger.warning(f"[增强] 已删除旧目录: {enhanced_save_dir}")

    os.makedirs(enhanced_save_dir, exist_ok=True)
    base_annotations = read_json(base_annotations_path)

    enhancer = CoordImageEnhancer(
        save_dir=enhanced_save_dir,
        count_strategy=count_strategy,
        per_image_k=per_image_k,
        op_probs=op_probs or {"h": 0.4, "v": 0.4, "vh": 0.15, "rot90": 0.05},
        seed=seed
    )

    enhanced_ann, n_imgs, n_new = {}, 0, 0
    for image_name, rec in tqdm(base_annotations.items(), desc="增强样本"):
        if allow_keys and image_name not in allow_keys:
            continue
        det = rec.get("detection")
        if not det:
            continue
        label_list = rec.get("label", [])
        quality = rec.get("quality")
        try:
            new_items = enhancer.enhance_one(
                image_path=rec["image_path"],
                image_name=rec["image_name"],
                detection_dict=det,
                location_dict={},
                source_dataset='aier',
                label=label_list,
                quality=quality
            )
        except Exception as e:
            # 关键调试信息：图像名与路径
            raise RuntimeError(f"[增强失败] {image_name} -> {rec.get('image_path')} | err={e}")

        for it in new_items:
            enhanced_ann[it["image_name"]] = it
        n_imgs += 1
        n_new += len(new_items)

    enh_ann_path = os.path.join(enhanced_save_dir, "annotations.json")
    save_json(enhanced_ann, enh_ann_path)
    logger.info(f"增强完成：增强源图 {n_imgs} 张，生成新样本 {n_new} 张。")
    return {"annotations": enhanced_ann, "stats": {"enhanced_from_images": n_imgs, "enhanced_new_images": n_new}}


# ------------------------ 主流程（按题型分文件 & 独立目录） ------------------------ #
def build_multimodal_dataset(
    data_root: str,
    global_split_path: str,
    interim_dir: str,
    alpaca_train_dir: str,
    alpaca_test_dir: str,
    enable_enhance: bool = True,
    enhanced_save_dir: str = "./experiments/dataset_enhanced/coor_qa",
    enhance_count_strategy: str = "random",
    enhance_per_image_k: int = 2,
    enhance_op_probs: Optional[Dict[str, float]] = None,
    enhance_seed: int = 42,
    enhance_force_rebuild: bool = False,
    rl_num: int = 800,
    log_dir: str = "./experiments/log",
    enable_types: Optional[List[str]] = None
):
    """
    单数据集 + RL 留出集流程（按题型分文件；训练/测试独立目录/独立 dataset_info）

    参数：
      - rl_num: 以“病人ID数”计数的 RL 留出规模（非图像数）；
      - enable_types: 若传入，仅保留指定题型（q_type 字符串），否则保留全部。
    """
    setup_logger(log_dir)
    os.makedirs(interim_dir, exist_ok=True)
    os.makedirs(alpaca_train_dir, exist_ok=True)
    os.makedirs(alpaca_test_dir, exist_ok=True)

    # Step 1. 收集数据并落盘 simple_annotations
    annotations, split, simple_ann_path = gather_data(
        data_root=data_root,
        global_split_path=global_split_path,
        save_dir=interim_dir
    )
    logger.info(f"🔍 收集到有效标注样本: {len(annotations)} 条 | 训练 {len(split['train'])} | 测试 {len(split['test'])}")

    # Step 2. RL 留出集按 ID 划分
    sft_rl_split = make_rl_holdout(split, rl_num=rl_num, seed=enhance_seed)
    save_json(sft_rl_split, os.path.join(interim_dir, "sft_rl_split.json"))
    logger.success(f"[划分] RL 留出图像: {len(sft_rl_split['train_rl'])} | SFT 训练图像: {len(sft_rl_split['train_sft'])}")

    # Step 3. 可选增强（仅对 train_sft 做增强）
    if enable_enhance:
        allow_keys = set(sft_rl_split["train_sft"])
        enh = build_enhanced_coor_qa(
            base_annotations_path=simple_ann_path,
            enhanced_save_dir=enhanced_save_dir,
            count_strategy=enhance_count_strategy,
            per_image_k=enhance_per_image_k,
            op_probs=enhance_op_probs,
            seed=enhance_seed,
            force_rebuild=enhance_force_rebuild,
            allow_keys=allow_keys
        )
        enhanced_ann = enh["annotations"]
        merged_ann = {**annotations, **enhanced_ann}
        merged_split = sft_rl_split
        merged_split["train_sft"].extend(list(enhanced_ann.keys()))
    else:
        merged_ann, merged_split = annotations, sft_rl_split

    save_json(merged_ann, os.path.join(interim_dir, "coor_qa_annotations_merged.json"))
    save_json(merged_split, os.path.join(interim_dir, "split_merged.json"))
    
    # ------------------------ 生成“分类/质量”简答问答（训练集） ------------------------ #
    # 说明：
    #   - 仅对训练集合（train_sft）生成“简答问答（simpleQA）”，测试集留给后续单独处理；
    #   - 使用 PromptConstructor（已实现黑白名单/白名单强负样本逻辑）；
    #   - 题型按返回字典展开，但训练集最终仍合并存为一个 JSON（保留 question_type 字段便于统计）。
    try:
        with open('./configs/all_cn_diease.json', 'r', encoding='utf-8') as f:
            all_cn_diease = json.load(f)
    except Exception as e:
        raise RuntimeError(f"加载 all_cn_diease 失败: ./configs/all_cn_diease.json | err={e}")

    try:
        gen_prompt = PromptConstructor(
            all_cn_diseases=all_cn_diease,
            seed=42,
            diease_relation_map='./configs/diease_relation.json'
        )
    except Exception as e:
        raise RuntimeError(f"初始化 PromptConstructor 失败，请检查配置文件与路径 | err={e}")

    simpleQA_train: List[Dict[str, Any]] = []
    qa_type_counter = Counter()

    # 仅处理训练相关子集（这里 merged_split 只包含 train_sft / train_rl / test，
    # 但 RL 留出集一般不用于 SFT 训练，故仅对 train_sft 生成 simpleQA）
    use_annotations = merged_ann
    use_split = merged_split

    for subset, imgs in use_split.items():
        if subset != 'train_sft':
            continue  # 仅生成训练集简答问答；test 集单独留给后续处理
        for img_name in tqdm(imgs, desc=f"生成SimpleQA数据-{subset}"):
            item = use_annotations.get(img_name)
            if not item:
                continue
            label = item.get("label")      # List[str] or None
            quality = item.get("quality")  # str or None

            # 没有任何标签（既无诊断也无质量）跳过
            if not label and not quality:
                continue

            # 调用新版接口：返回 dict
            # { "quality": [...], "judge": [...], "compare": [...], "diagnosis": [...], "quality+diagnosis": [...], "choice"...,"quality" }
            try:
                qa_dict = gen_prompt.construct_prompts(true_diag=label, quality_text=quality)
            except Exception as e:
                # 发生配置/采样异常时立即 raise，附带关键信息（符合你前述规范）
                raise RuntimeError(f"[SimpleQA 构造失败] image={img_name} | label={label} | quality={quality} | err={e}")

            # 遍历所有题型，将其展开为统一 QA 条目；训练集合并存一个文件，保留 question_type 字段
            for q_type, qa_list in qa_dict.items():
                for question, answer in qa_list:
                    qa_type_counter[q_type] += 1
                    multimodal_item = {
                        "question_type": q_type,
                        "instruction": "<image>" + question,
                        "input": "",
                        "output": answer,
                        "images": [item.get("image_path")]
                    }
                    simpleQA_train.append(multimodal_item)

    # 打乱
    random.shuffle(simpleQA_train)

    # 抽样可视化
    vis_n = min(3, len(simpleQA_train))
    if vis_n > 0:
        samples = random.sample(simpleQA_train, k=vis_n)
        print("=" * 60)
        print(f"🎯 随机抽取 {vis_n} 条训练 SimpleQA 样本可视化：")
        print("=" * 60)
        for i, it in enumerate(samples, 1):
            print(f"\n📘 样本 {i}:")
            print(f"🖼️  图片路径: {it['images'][0]}")
            print(f"🧩  指令: {it['instruction']}")
            print(f"📝  输出: {it['output']}")
            print("-" * 60)

    # ================= 统计题型分布（SimpleQA-Train） =================
    total_qa = sum(qa_type_counter.values())
    logger.info(f"[SimpleQA-Train] 条目数: {len(simpleQA_train)}")
    logger.info("各题型生成数量统计（绝对数 & 占比）:")
    for q_type, count in qa_type_counter.items():
        ratio = (count / total_qa) * 100 if total_qa > 0 else 0
        logger.info(f"  - {q_type:<18}: {count:>6}  ({ratio:5.2f}%)")

    # ================= 保存 SimpleQA（训练集合并存一个文件，保留与坐标QA同目录） =================
    # 说明：与坐标类训练集同一目录（alpaca_train_dir），文件名独立
    simpleqa_train_file = os.path.join(alpaca_train_dir, "aier_simpleQA_train.json")
    save_json(simpleQA_train, simpleqa_train_file)

    # === 更新训练目录的 dataset_info.json（登记 simpleQA） ===
    merge_dataset_info(alpaca_train_dir, {
        "aier_simpleQA_train": {
            "file_name": os.path.basename(simpleqa_train_file),
            "columns": {
                "prompt": "instruction",
                "query": "input",
                "response": "output",
                "images": "images"
            }
        }
    })
    logger.success("✅ SimpleQA 训练数据已输出完毕。")
        # ========== 测试集：按题型分别存到同一个文件夹（alpaca_test_dir） ==========
    test_buckets: Dict[str, List[Dict[str, Any]]] = defaultdict(list)
    test_counter = Counter()

    for k in tqdm(merged_split.get("test", []), desc="构造测试题目"):
        item = use_annotations.get(img_name)
        if not item:
            raise FileNotFoundError(f"[测试图像缺失] image={k} -> {item.get('image_path')}")
        label = item.get("label")      # List[str] or None
        quality = item.get("quality")  # str or None
        # 没有任何标签（既无诊断也无质量）跳过
        if not label and not quality:
            continue
        try:
            qa_dict = gen_prompt.construct_prompts(true_diag=label, quality_text=quality)
        except Exception as e:
            # 发生配置/采样异常时立即 raise，附带关键信息（符合你前述规范）
            raise RuntimeError(f"[SimpleQA 构造失败] image={img_name} | label={label} | quality={quality} | err={e}")
        # 遍历所有题型，将其展开为统一 QA 条目；训练集合并存一个文件，保留 question_type 字段
        for q_type, qa_list in qa_dict.items():
            for question, answer in qa_list:
                test_counter[q_type] += 1
                test_buckets[q_type].append({
                "instruction": "<image>" + question,
                "input": "",
                "output": answer,
                "images":  [item.get("image_path")]
            })

    # 统一保存在 alpaca_test_dir 下：每个题型一个 json，且该目录只有一个 dataset_info.json
    os.makedirs(alpaca_test_dir, exist_ok=True)

    # 汇总写入 dataset_info 的条目
    ds_info_test = {}

    for q_type, items in test_buckets.items():
        random.shuffle(items)
        # 文件名统一置于 alpaca_test_dir 根下
        safe_qtype = q_type.replace("+", "_")  # 防止文件名中的 '+' 带来混淆
        fname = f"aier_coor_qa_test_{safe_qtype}.json"
        fpath = os.path.join(alpaca_test_dir, fname)
        save_json(items, fpath)

        # 在同一个 dataset_info.json 中登记所有题型
        ds_info_test[f"aier_coor_qa_test_{safe_qtype}"] = {
            "file_name": fname,
            "columns": {
                "prompt": "instruction",
                "query": "input",
                "response": "output",
                "images": "images"
            }
        }

    # 合并/写入 dataset_info.json（只在 alpaca_test_dir 这一层）
    merge_dataset_info(alpaca_test_dir, ds_info_test)

    # 统计输出（测试）
    total_test = sum(test_counter.values())
    logger.info("【测试集】各题型数量（绝对数 & 占比）:")
    for t, c in test_counter.items():
        ratio = (c / total_test * 100) if total_test > 0 else 0.0
        logger.info(f"  - {t:<24}: {c:>6}  ({ratio:5.2f}%)")
    logger.success(f"测试集构造完成，总计 {total_test} 条。")
    
    # Step 4. 生成 Alpaca（训练合并一个文件；测试按题型分子文件夹）
    gen = BoxQAGenerator(iou_neg_threshold=0.05, black_ratio_threshold=0.20, neg_trials=5, rng_seed=42)

    # ========== 坐标相关训练集 ==========
    train_items: List[Dict[str, Any]] = []
    train_counter = Counter()

    for k in tqdm(merged_split.get("train_sft", []), desc="生成QA-train_sft"):
        item = merged_ann.get(k)
        if not item:
            continue
        img_path = item.get("image_path")
        if not img_path or not os.path.exists(img_path):
            # 训练也严格：缺图直接报错，避免脏数据
            raise FileNotFoundError(f"[训练图像缺失] image={k} -> {img_path}")
        det = item.get("detection")
        if not det:
            # 无检测框则跳过坐标类 QA
            continue

        for cn_key, boxes in det.items():
            if not boxes:
                continue
            try:
                triples = build_box_qa(gen, label_cn=cn_key, bboxes_norm=boxes, image_path=img_path)
            except Exception as e:
                raise RuntimeError(f"[构造QA失败-训练] image={k} | label={cn_key} | err={e}")
            for q, a, qtype in triples:
                train_counter[qtype] += 1
                train_items.append({
                    "question_type": qtype,
                    "instruction": "<image>" + q,
                    "input": "",
                    "output": a,
                    "images": [img_path]
                })

    random.shuffle(train_items)
    train_file = os.path.join(alpaca_train_dir, "aier_coor_qa_train.json")
    save_json(train_items, train_file)
    logger.info(f"[训练集] 总计 {len(train_items)} 条。按题型统计：")
    total_train = sum(train_counter.values())
    for t, c in train_counter.items():
        ratio = (c / total_train * 100) if total_train > 0 else 0.0
        logger.info(f"  - {t:<24}: {c:>6}  ({ratio:5.2f}%)")

    merge_dataset_info(alpaca_train_dir, {
        "aier_coor_qa_train": {
            "file_name": os.path.basename(train_file),
            "columns": {
                "prompt": "instruction",
                "query": "input",
                "response": "output",
                "images": "images"
            }
        }
    })

    # ========== 测试集：按题型分别存到同一个文件夹（alpaca_test_dir） ==========
    test_buckets: Dict[str, List[Dict[str, Any]]] = defaultdict(list)
    test_counter = Counter()

    for k in tqdm(merged_split.get("test", []), desc="构造测试题目"):
        item = merged_ann.get(k)
        if not item:
            continue

        img_path = item.get("image_path")
        if not img_path or not os.path.exists(img_path):
            # 测试集严格：缺图报错
            raise FileNotFoundError(f"[测试图像缺失] image={k} -> {img_path}")

        # 无检测框的图像不产生坐标问答
        det = item.get("detection")
        if not det:
            continue

        for cn_key, boxes in det.items():
            if not boxes:
                continue
            try:
                triples = build_box_qa(gen, label_cn=cn_key, bboxes_norm=boxes, image_path=img_path)
            except Exception as e:
                # 立即抛出并附带关键信息，禁止静默 continue
                raise RuntimeError(f"[构造QA失败-测试] image={k} | label={cn_key} | err={e}")
            for q, a, qtype in triples:
                test_counter[qtype] += 1
                test_buckets[qtype].append({
                    "q_type":q_type,
                    "instruction": "<image>" + q,
                    "input": "",
                    "output": a,
                    "images": [img_path]
                })

    # 统一保存在 alpaca_test_dir 下：每个题型一个 json，且该目录只有一个 dataset_info.json
    os.makedirs(alpaca_test_dir, exist_ok=True)

    # 汇总写入 dataset_info 的条目
    ds_info_test = {}

    for q_type, items in test_buckets.items():
        random.shuffle(items)
        # 文件名统一置于 alpaca_test_dir 根下
        safe_qtype = q_type.replace("+", "_")  # 防止文件名中的 '+' 带来混淆
        fname = f"aier_coor_qa_test_{safe_qtype}.json"
        fpath = os.path.join(alpaca_test_dir, fname)
        save_json(items, fpath)

        # 在同一个 dataset_info.json 中登记所有题型
        ds_info_test[f"aier_coor_qa_test_{safe_qtype}"] = {
            "file_name": fname,
            "columns": {
                "q_type":q_type,
                "prompt": "instruction",
                "query": "input",
                "response": "output",
                "images": "images"
            }
        }

    # 合并/写入 dataset_info.json（只在 alpaca_test_dir 这一层）
    merge_dataset_info(alpaca_test_dir, ds_info_test)

    # 统计输出（测试）
    total_test = sum(test_counter.values())
    logger.info("【测试集】各题型数量（绝对数 & 占比）:")
    for t, c in test_counter.items():
        ratio = (c / total_test * 100) if total_test > 0 else 0.0
        logger.info(f"  - {t:<24}: {c:>6}  ({ratio:5.2f}%)")
    logger.success(f"测试集构造完成，总计 {total_test} 条。")




# ------------------------ CLI ------------------------ #
def parse_args():
    import argparse
    ap = argparse.ArgumentParser(description="坐标类 QA 构建流水线（按ID划分RL + 题型分文件 + train/test独立目录）")
    ap.add_argument("--data_root", type=str, default="/home/zhangpinglu/data0/gy/Dataset/aier_processed")
    ap.add_argument("--interim_dir", type=str, default="/home/zhangpinglu/data0/gy/Dataset/fundusreasoner/aier_support")

    # 改为“分别的保存目录”
    ap.add_argument("--alpaca_train_dir", type=str, default="/home/zhangpinglu/data0/gy/Dataset/fundusreasoner/Alpaca_data")
    ap.add_argument("--alpaca_test_dir",  type=str, default="/home/zhangpinglu/data0/gy/Dataset/fundusreasoner/Alpaca_data_test")

    ap.add_argument("--enable_enhance", action="store_true")
    ap.add_argument("--enhanced_save_dir", type=str, default="./experiments/dataset_enhanced/coor_qa")
    ap.add_argument("--enhance_strategy", type=str, default="random")
    ap.add_argument("--enhance_per_k", type=int, default=2)
    ap.add_argument("--enhance_seed", type=int, default=42)
    ap.add_argument("--enhance_force_rebuild", action="store_true")

    # 重要：rl_num 表示“病人ID数量”
    ap.add_argument("--rl_num", type=int, default=800)

    ap.add_argument("--log_dir", type=str, default="./experiments/log")

    # 可选：仅保留指定题型（逗号分隔），例如 "coor_judge,coor_compare"
    ap.add_argument("--enable_types", type=str, default="")
    return ap.parse_args()


if __name__ == "__main__":
    args = parse_args()
    enable_types = [s.strip() for s in args.enable_types.split(",") if s.strip()] or None

    build_multimodal_dataset(
        data_root=args.data_root,
        global_split_path=os.path.join(args.data_root, 'split.json'),
        interim_dir=args.interim_dir,
        alpaca_train_dir=args.alpaca_train_dir,
        alpaca_test_dir=args.alpaca_test_dir,
        enable_enhance=args.enable_enhance,
        enhanced_save_dir=args.enhanced_save_dir,
        enhance_count_strategy=args.enhance_strategy,
        enhance_per_image_k=args.enhance_per_k,
        enhance_op_probs=None,  # 使用内部默认或通过函数参数传入
        enhance_seed=args.enhance_seed,
        enhance_force_rebuild=args.enhance_force_rebuild,
        rl_num=args.rl_num,   # 解释为“病人ID数量”
        log_dir=args.log_dir,
        enable_types=enable_types
    )
