"""
RL 阶段数据集生成脚本（带数据分布统计，适配 cleaned_dataset 为 dict 格式）
=====================================================================

【功能说明】
本脚本将清洗后的 med_pub 数据集处理为 RL 阶段训练可用的 Parquet 格式数据，
并生成详细的数据分布统计文件，便于训练前检查样本平衡性。

主要功能：
1. 读取清洗后的标注数据（cleaned_json，当前为 dict[image_path] -> cleaned_label 的新格式）；
2. 将 dict 转为旧格式 list[{"image_path", "cleaned_label"}] 以复用既有逻辑；
3. 仅保留位于 RL 划分列表（rl_split_list）中的样本；
4. 按类别分层进行 train/test 划分，保证类别平衡；
5. 将样本批量映射为 RL 训练所需格式（去掉 tqdm，支持一次性批量处理）；
6. 将 train/test 分别保存为 Parquet 文件；
7. 输出数据统计信息到 JSON 文件。

【输入文件】
- cleaned_json: 清洗后的 med_pub 数据（dict[str -> str | List[str]]）
  例：
      {
        "/abs/path/img1.png": "正常眼底",
        "/abs/path/img2.png": ["病灶1", "病灶2"],
        ...
      }

- rl_split_list: RL 阶段使用的图片路径列表（JSON，list[{"image_path": "..."}] 或 list[str] 均支持）

【输出文件】
- med_pub_rl_train.parquet
- med_pub_rl_test.parquet
- med_pub_rl_data_info.json  ← 数据分布统计

作者: zym1105
时间: 2025-08-08
"""

import argparse
import json
import os
import random
from collections import Counter, defaultdict
from typing import Any, Dict, List, Tuple

import pandas as pd
from loguru import logger

# ===== 全局常量 =====
MED_PROMPT_PATH = '../fundus-reasoner-adaptive/fundus_reasoner/data_preprocess/configs/aier_cot_prompt.txt'
SYSTEM_ANSWER_PREFIX = "综上，本图片诊断为"


# ===== 工具函数 =====
def parse_args():
    """解析命令行参数"""
    parser = argparse.ArgumentParser(description="生成 RL 阶段训练数据集（Parquet 格式 + 数据统计）")
    parser.add_argument("--cleaned_json", type=str,
                        default="../fundus-reasoner-adaptive/experiments/dataset/med_pub/cleaned_img_label.json",
                        help="清洗后的标注 JSON 路径（dict[image_path] -> cleaned_label）")
    parser.add_argument("--rl_split_list", type=str,
                        default="/home/zhangpinglu/data0/gy/code/fundus-reasoner-adaptive/experiments/dataset/med_pub/med_pub_rl.json",
                        help="RL 阶段划分列表 JSON 路径（list[str] 或 list[{'image_path': str}]）")
    parser.add_argument("--test_ratio", type=float, default=0.1,
                        help="测试集比例，例如 0.1 表示 10% 数据用于测试")
    parser.add_argument("--output_dir", type=str, default="./experiments/dataset/RL_parquet/",
                        help="处理后数据的输出目录")
    parser.add_argument("--save_prefix_name", type=str, default="med_pub_rl",
                        help="输出文件前缀名")
    return parser.parse_args()


def load_json(path: str):
    """读取 JSON 文件"""
    with open(path, 'r', encoding='utf-8') as f:
        return json.load(f)


def construct_answer(cleaned_label: Any) -> str:
    """根据清洗后的标签构建 ground_truth 答案字符串"""
    if isinstance(cleaned_label, list):
        return ", ".join(map(str, cleaned_label))
    elif isinstance(cleaned_label, str):
        return cleaned_label
    else:
        raise ValueError(f"unexpected data type {type(cleaned_label)} for {cleaned_label}")


def to_legacy_list(cleaned_dataset_map: Dict[str, Any]) -> List[Dict[str, Any]]:
    """
    将新的 cleaned_dataset（dict[image_path] -> cleaned_label）转换为旧格式 list[{'image_path','cleaned_label'}]
    """
    legacy = []
    for img_path, label in cleaned_dataset_map.items():
        legacy.append({"image_path": img_path, "cleaned_label": label})
    return legacy


def stratified_split(
    data_list: List[Dict[str, Any]],
    test_ratio: float = 0.1,
    seed: int = 42
) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]:
    """
    类别分层划分 train/test。
    - data_list: list[{'image_path', 'cleaned_label'}]
    - 多标签样本取第一个标签做分层（与原逻辑保持一致）
    """
    random.seed(seed)
    label2items = defaultdict(list)
    for item in data_list:
        label_val = item["cleaned_label"]
        label = label_val[0] if isinstance(label_val, list) and label_val else label_val
        label2items[label].append(item)

    train_set, test_set = [], []
    for label, items in label2items.items():
        random.shuffle(items)
        n_test = max(1, int(len(items) * test_ratio)) if len(items) > 1 else 1
        test_set.extend(items[:n_test])
        train_set.extend(items[n_test:])
    return train_set, test_set


def make_batch_mapper(split: str, cleaned_dataset_map: Dict[str, Any], med_prompt_str: str):
    """
    构造“批量”映射函数。输入一批 image_path，输出一批 RL 条目。
    这样避免逐条 tqdm/逐条函数调度的开销，风格更“批处理”。
    """
    def process_batch(img_paths: List[str]) -> List[Dict[str, Any]]:
        outputs = []
        for idx, img_path in enumerate(img_paths):
            record_label = cleaned_dataset_map.get(img_path)
            if record_label is None:
                logger.warning(f"Image path {img_path} not found in cleaned_dataset. Skipping.")
                continue
            answer_str = construct_answer(record_label)
            outputs.append({
                "data_source": "fundus_qa",
                "prompt": [
                    {
                        "role": "user",
                        "content": "<image>" + med_prompt_str
                    }
                ],
                "images": [{"image": img_path}],
                "ability": "diagnosis",
                "reward_model": {
                    "style": "rule",
                    "ground_truth": SYSTEM_ANSWER_PREFIX + answer_str
                },
                "extra_info": {
                    "split": split,
                    "index": idx
                }
            })
        return outputs
    return process_batch


def save_parquet(data_list: List[Dict[str, Any]], output_path: str):
    """保存为 parquet 文件"""
    df = pd.DataFrame(data_list)
    df.to_parquet(output_path, index=False)
    logger.success(f"{len(data_list)} 条数据已保存到 {output_path}")


def save_data_info(
    train_data: List[Dict[str, Any]],
    test_data: List[Dict[str, Any]],
    output_dir: str,
    save_prefix_name: str,
    test_ratio: float
):
    """保存数据分布统计信息（按照第一标签统计）"""
    def count_labels(data: List[Dict[str, Any]]):
        label_counts = Counter()
        for item in data:
            label = item["cleaned_label"]
            if isinstance(label, list) and label:
                label = label[0]
            label_counts[label] += 1
        return dict(label_counts)

    train_labels = count_labels(train_data)
    test_labels = count_labels(test_data)

    normal_label = "正常眼底"
    train_normal = train_labels.get(normal_label, 0)
    test_normal = test_labels.get(normal_label, 0)
    abnormal_train = len(train_data) - train_normal
    abnormal_test = len(test_data) - test_normal

    info = {
        "total": {
            "train": len(train_data),
            "test": len(test_data),
            "all": len(train_data) + len(test_data)
        },
        "label_distribution": {
            "train": train_labels,
            "test": test_labels
        },
        "normal_vs_abnormal": {
            "train": {"normal": train_normal, "abnormal": abnormal_train},
            "test": {"normal": test_normal, "abnormal": abnormal_test}
        },
        "settings": {
            "test_ratio": test_ratio
        }
    }

    save_path = os.path.join(output_dir, f"{save_prefix_name}_data_info.json")
    with open(save_path, "w", encoding="utf-8") as f:
        json.dump(info, f, ensure_ascii=False, indent=2)
    logger.success(f"数据统计信息已保存到 {save_path}")


# ===== 主流程 =====
def main():
    args = parse_args()
    os.makedirs(args.output_dir, exist_ok=True)

    # 配置日志：控制台 + 文件（自动切分）
    logger.remove()
    logger.add(lambda msg: print(msg, end=""), level="INFO")
    logger.add(os.path.join(args.output_dir, "preprocess.log"), rotation="1 MB", encoding="utf-8", level="INFO")

    # 1) 加载必要数据
    med_prompt_str = open(MED_PROMPT_PATH, 'r', encoding='utf-8').read()
    cleaned_dataset_all = load_json(args.cleaned_json)  # dict[image_path] -> cleaned_label

    # 2) 加载 RL 划分列表（兼容两种格式：list[str] 或 list[{'image_path': str}]）
    rl_list_raw = load_json(args.rl_split_list)
    if isinstance(rl_list_raw, list) and rl_list_raw and isinstance(rl_list_raw[0], dict):
        rl_image_list = [x["image_path"] for x in rl_list_raw if "image_path" in x]
    else:
        rl_image_list = rl_list_raw  # 假定已是 list[str]
    rl_image_list = [os.path.abspath(p) for p in rl_image_list]
    logger.info(f"尝试加载清洗后的用于 RL 的图片列表，共 {len(rl_image_list)} 条")

    # 3) 仅保留 RL 列表中的样本（与 cleaned_dataset 的交集）
    #    cleaned_dataset_all 的 key 也转为绝对路径，以免路径风格不一致
    cleaned_map_abs = {os.path.abspath(k): v for k, v in cleaned_dataset_all.items()}
    kept = {}
    miss = 0
    for img_path in rl_image_list:
        val = cleaned_map_abs.get(img_path)
        if val is None:
            logger.warning(f"RL 列表中的样本未在 cleaned_dataset 中找到：{img_path}")
            miss += 1
            continue
        kept[img_path] = val
    logger.info(f"与 cleaned_dataset 命中的 RL 样本：{len(kept)}（缺失 {miss}）")
    assert len(kept) > 0, "清洗后的数据集在 RL 列表中为空，请检查输入。"

    # 4) 转为旧格式 list 便于后续统计和分层
    legacy_list = to_legacy_list(kept)

    # 5) 分层划分 train/test
    train_data, test_data = stratified_split(legacy_list, test_ratio=args.test_ratio)
    logger.info(f"分层划分完成：train={len(train_data)}, test={len(test_data)}")

    # 6) 保存统计信息
    save_data_info(train_data, test_data, args.output_dir, args.save_prefix_name, args.test_ratio)

    # 7) 构建“批量”映射器，并分别处理 train/test
    batch_mapper_train = make_batch_mapper(split="train", cleaned_dataset_map=kept, med_prompt_str=med_prompt_str)
    batch_mapper_test = make_batch_mapper(split="test", cleaned_dataset_map=kept, med_prompt_str=med_prompt_str)

    # 批量输入仅需要 image_path 列表
    train_paths = [x["image_path"] for x in train_data]
    test_paths = [x["image_path"] for x in test_data]

    processed_train = batch_mapper_train(train_paths)
    processed_test = batch_mapper_test(test_paths)

    # 8) 保存 parquet
    save_parquet(processed_train, os.path.join(args.output_dir, f"{args.save_prefix_name}_train.parquet"))
    save_parquet(processed_test, os.path.join(args.output_dir, f"{args.save_prefix_name}_test.parquet"))

    logger.success("全部处理完成！")


if __name__ == "__main__":
    main()
