# -*- coding: utf-8 -*-
"""
对med_pub数据集进行处理，将其转换为Alpaca格式的多模态数据集，
并将“简单问答”和“正常样本思维链”分开保存，train/test各自独立。

作者：zym1105
时间：2025-08-06
"""

import os
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import json
import random
import argparse
from collections import Counter, defaultdict
from tqdm import tqdm
from loguru import logger
from src.utils.prompt_construct import PromptConstructor
from src.utils.image_enhance import build_enhanced_cls
Normal_key = "正常眼底"


# ===================== 参数与工具 =====================

def parse_args():
    parser = argparse.ArgumentParser(description="med_pub多模态数据转Alpaca格式")
    parser.add_argument("--data_root", type=str, default="/home/zhangpinglu/data0/gy/Dataset/public_processed", help="处理后的数据根目录")

    parser.add_argument("--disease_map_path", type=str, default="./configs/diseases_discription.json", help="疾病映射json")
    parser.add_argument("--quality_map_path", type=str, default="./configs/quality_discription.json", help="质量映射json")
    parser.add_argument("--alpaca_save_dir", type=str, default="/home/zhangpinglu/data0/gy/Dataset/fundusreasoner/Alpaca_data/", help="Alpaca格式数据保存目录")
    parser.add_argument("--interim_dir", type=str, default="/home/zhangpinglu/data0/gy/Dataset/fundusreasoner/dataset_support/", help="其他关键文件保存位置")
    parser.add_argument("--test_ratio", type=float, default=0.1, help="测试集比例")
    parser.add_argument("--log_dir", type=str, default="./experiments/log", help="日志目录")
    parser.add_argument("--seed", type=int, default=42)

    parser.add_argument("--enhanced_dir", type=str, default="/home/zhangpinglu/data0/gy/Dataset/public_enhanced/classification", help="增强数据保存目录")
    parser.add_argument("--enhance_strategy", type=str, default="random", choices=["random", "fixed"], help="增强张数策略: random 或 fixed")
    parser.add_argument("--enhance_per_k", type=int, default=2, help="固定策略下每张图增强数量")
    parser.add_argument("--enhance_force_rebuild", action="store_true", help="是否强制重建增强数据集")
    return parser.parse_args()



def setup_logger(log_dir):
    os.makedirs(log_dir, exist_ok=True)
    log_path = os.path.join(log_dir, "process_med_pub_data2Alpaca.log")
    logger.add(log_path, rotation="10 MB", encoding="utf-8", enqueue=True)
    logger.info(f"日志文件写入：{log_path}")


def read_json(path):
    with open(path, "r", encoding="utf-8") as f:
        return json.load(f)


def save_json(data, path):
    os.makedirs(os.path.dirname(path), exist_ok=True)
    with open(path, "w", encoding="utf-8") as f:
        json.dump(data, f, ensure_ascii=False, indent=2)
    logger.info(f"已保存：{path}")


def merge_dataset_info(alpaca_dir, info_dict):
    info_path = os.path.join(alpaca_dir, "dataset_info.json")
    if os.path.exists(info_path):
        with open(info_path, "r", encoding="utf-8") as f:
            all_info = json.load(f)
    else:
        all_info = {}
    all_info.update(info_dict)
    save_json(all_info, info_path)
    logger.success("dataset_info.json 已更新。")


# ===================== 数据整合逻辑 =====================
back_list=[
    "Messidor"# 原因：这个数据集后续被mapledr重新标注了，不过一开始处理了一次，但后来统一用maplesdr的anntations了
]
def gather_data(data_root, split_ratio, disease_map_path, quality_map_path, save_dir="./experiments/dataset"):
    """收集所有数据，并做train/test划分 + RL子集提取 + 中间文件保存"""
    os.makedirs(save_dir, exist_ok=True)
    sub_set_list = os.listdir(data_root)
    random.seed(42)

    with open(disease_map_path, "r", encoding="utf-8") as f:
        raw_disease_map = json.load(f)
    disease_map = {}
    for k, v in raw_disease_map.items():
        disease_map[k.lower()] = v  # key 小写，value 保持原样
    with open(quality_map_path, "r", encoding="utf-8") as f:
        quality_map = json.load(f)

    classification_annotations = {}
    split_data = {"train": [], "test": []}
    all_cn_diease=[  "早期年龄相关性黄斑变性","中期年龄相关性黄斑变性","视网膜静脉阻塞",
                        "晚期年龄相关性黄斑变性","视神经病变","进展期新生血管性黄斑变性","黄斑营养不良","进展期地图样萎缩黄斑变性",  "有髓神经纤维"]# 这几个类是aier数据的特殊类，先放里面
    sub_set_list = [s for s in sub_set_list if s not in back_list]
    logger.info(f"处理子集{len(sub_set_list)}个：{sub_set_list}")
    for sub_set in sub_set_list:
        data_cnt=0
        anno_path = os.path.join(data_root, sub_set, "annotations.json")
        if not os.path.exists(anno_path):
            continue

        with open(anno_path, "r", encoding="utf-8") as f:
            data_dict = json.load(f)

        # split.json若存在则使用
        split_json_path = os.path.join(data_root, sub_set, "split.json")
        if os.path.exists(split_json_path):
            split_info = read_json(split_json_path)
            split_data["train"].extend(split_info.get("train", []))
            split_data["test"].extend(split_info.get("test", []))
        else:
            keys = list(data_dict.keys())
            random.shuffle(keys)
            n_test = int(len(keys) * split_ratio)
            split_data["test"].extend(keys[:n_test])
            split_data["train"].extend(keys[n_test:])

        for image_name, data in data_dict.items():
            diag = data.get("diagnosis", {})
            class_data = diag.get("classification", {})

            if not class_data:
                continue  # 无分类信息（可能是纯分割/定位）
            data_cnt+=1
            image_info = {"image_path": data["image_path"]}

            # 疾病映射
            class_label = None
            if "text" in class_data:
                classification_label_text = class_data["text"] 
                class_cn = disease_map.get(classification_label_text.lower(), None)
                if class_cn is None:
                    raise ValueError(f"{image_name} 在 {sub_set} 具有标注 {classification_label_text}，但未定义对应映射")
                if len(class_cn)<=0:
                    continue
                class_label=class_cn
                for cn_name in class_cn:
                    if cn_name not in all_cn_diease:
                        all_cn_diease.append(cn_name)


            # 质量映射
            quality_label = None
            if "quality" in class_data:
                quality_text = class_data["quality"]
                quality_cn = quality_map.get(quality_text, None)
                if quality_cn is None:
                    raise ValueError(f"{image_name} 的质量标签 '{quality_text}' 未定义映射，请在quality_map.json中添加")
                quality_label = quality_cn

            image_info.update({"label": class_label, "quality": quality_label})
            classification_annotations[image_name] = image_info
        logger.info(f"子集 {sub_set} 处理完成，样本数: {data_cnt}")
    # ===============================
    # RL划分逻辑：从train中抽取子集
    # ===============================
    all_train_keys = split_data["train"]
    normal_imgs = [
    k for k in all_train_keys
    if (labels := classification_annotations.get(k, {}).get("label"))
    and len(labels) > 0
    and labels[0] == Normal_key
    ]

    abnormal_imgs = [
        k for k in all_train_keys
        if (labels := classification_annotations.get(k, {}).get("label"))
        and len(labels) > 0
        and labels[0] != Normal_key
    ]


    rl_normals = normal_imgs[:2000] if len(normal_imgs) > 2000 else normal_imgs
    rl_abnormals = abnormal_imgs[:5000] if len(abnormal_imgs) > 5000 else abnormal_imgs
    rl_all = rl_normals + rl_abnormals
    rl_annotations = {k: classification_annotations[k] for k in rl_all if k in classification_annotations}

    logger.info(f"Train样本数: {len(all_train_keys)} (正常 {len(normal_imgs)}, 异常 {len(abnormal_imgs)})")
    logger.info(f"RL子集: 共 {len(rl_all)} (正常 {len(rl_normals)}, 异常 {len(rl_abnormals)})")
    logger.info(f"Test样本数: {len(split_data['test'])}")
    logger.info(f"cn name 种类数: {len(all_cn_diease)}")

    # ===============================
    # 中间结果保存
    # ===============================
    save_json(classification_annotations, os.path.join(save_dir, "cls_annotations.json"))
    save_json(split_data, os.path.join(save_dir, "split.json"))
    save_json(rl_annotations, os.path.join(save_dir, "rl_annotations.json"))
    save_json(all_cn_diease, os.path.join('./configs', "all_cn_diease.json"))
    logger.success(f"中间文件已保存至 {save_dir}：cls_annotations.json / split.json / rl_annotations.json")

    return classification_annotations, split_data, rl_annotations,all_cn_diease


# ---------- 小工具：合并增强样本到原始annotations/split ----------

def _merge_enhanced_into_base(
    base_annotations: dict,
    base_split: dict,
    enhanced_annotations: dict,
    put_subset: str = "train"
):
    """
    将增强样本并入 base 标注与划分。
    规则：
      - 新增一个以增强文件名为键的条目，作为“独立样本”
      - 字段精简为：image_path / label / quality
      - 仅加入 train 划分（put_subset），不触动 test
    """
    if put_subset not in base_split:
        raise ValueError(f"split 不包含子集: {put_subset}")

    merged_annotations = dict(base_annotations)  # 浅拷贝
    merged_split = {k: list(v) for k, v in base_split.items()}  # 深一层拷贝

    add_cnt = 0
    for new_name, e in enhanced_annotations.items():
        # 精简后的条目（增强样本当做“真实存在的样本”）
        merged_annotations[new_name] = {
            "image_path": e["aug_image_path"],
        }
        # 同步 label/quality 到“扁平字段”，
        if e.get("label") is not None:
            merged_annotations[new_name]["label"] = e["label"]
        if e.get("quality") is not None:
            merged_annotations[new_name]["quality"] = e["quality"]

        # 划分：增强样本仅加入 train
        merged_split[put_subset].append(new_name)
        add_cnt += 1

    logger.info(f"合并增强样本完成：新增 {add_cnt} 条 -> 放入 {put_subset}")
    return merged_annotations, merged_split


# ---------- 主流程----------

def build_multimodal_dataset(
        data_root,
        disease_map_path,
        quality_map_path,
        interim_dir,
        alpaca_save_dir,
        test_ratio=0.1,
        log_dir="./experiments/log",
        enhanced_save_dir="./experiments/dataset_enhanced/cls",
        enhance_count_strategy="random",          # "random" 或 "fixed"
        enhance_per_image_k=2,                    # fixed 时每图增强张数
        enhance_op_probs=None,                    # 默认 None -> 内部用 {"h":0.4,"v":0.4,"vh":0.15,"rot90":0.05}
        enhance_seed=42,
        enhance_force_rebuild=False
):
    """
    说明：
      1) 先 gather_data 产出“原始” annotations/split/rl …（已在 gather_data 内部保存）
      2) 调用 build_enhanced_cls 仅对异常样本做增强，保存到 enhanced_save_dir
      3) 把增强样本合并进 train，生成“合并版” annotations/split，并保存到 interim_dir
      4) 用“合并版”生成 Alpaca SFT 数据
    """
    setup_logger(log_dir)
    os.makedirs(interim_dir, exist_ok=True)
    os.makedirs(alpaca_save_dir, exist_ok=True)

    # ===== 1) 收集原始数据=====
    annotations, split_data, rl_set, all_cn_diseases = gather_data(
        data_root, split_ratio=test_ratio,
        disease_map_path=disease_map_path,
        quality_map_path=quality_map_path,
        save_dir=interim_dir
    )

    # ===== 2) 生成增强数据集（仅异常样本） =====
    enh = build_enhanced_cls(
        annotations_path=os.path.join(interim_dir, "cls_annotations.json"),
        enhanced_save_dir=enhanced_save_dir,
        normal_key=Normal_key,
        count_strategy=enhance_count_strategy,
        per_image_k=enhance_per_image_k,
        op_probs=(enhance_op_probs or {"h": 0.4, "v": 0.4, "vh": 0.15, "rot90": 0.05}),
        seed=enhance_seed,
        force_rebuild=enhance_force_rebuild
    )
    enhanced_annotations = enh["annotations"]
    logger.info(f"增强完成: {enh['stats']}")

    # ===== 3) 合并增强样本到训练集 =====
    merged_annotations, merged_split = _merge_enhanced_into_base(
        base_annotations=annotations,
        base_split=split_data,
        enhanced_annotations=enhanced_annotations,
        put_subset="train"
    )

    # —— 合并版中间件保存（很重要，便于后续反复复用）——
    merged_ann_path = os.path.join(interim_dir, "cls_annotations_merged.json")
    merged_split_path = os.path.join(interim_dir, "split_merged.json")
    save_json(merged_annotations, merged_ann_path)
    save_json(merged_split, merged_split_path)
    logger.success(f"已保存合并版中间件: {merged_ann_path} / {merged_split_path}")

    # ===== 4) 用【合并版】构建 Alpaca QA 数据 =====
   
    prompt_constructor = PromptConstructor(all_cn_diseases,seed=42,diease_relation_map='./configs/diease_relation.json')

    simpleQA_train= []
    qa_type_counter = Counter()

    use_annotations = merged_annotations
    use_split = merged_split

    for subset, imgs in use_split.items():
        if subset == 'test':
            continue  # test 集单独留给后续处理
        for img_name in tqdm(imgs, desc=f"生成QA数据-{subset}"):
            item = use_annotations.get(img_name)
            if not item:
                continue
            label = item.get("label")      # List[str] or None
            quality = item.get("quality")  # str or None

            # 没有任何标签跳过
            if not label and not quality:
                continue

            # 调用新版接口：返回 dict
            qa_dict = prompt_constructor.construct_prompts(true_diag=label, quality_text=quality)

            # 遍历所有题型，将其展开为统一QA条目
            # # { "quality": [...], "judge": [...], "compare": [...], "diagnosis": [...], "quality+diagnosis": [...], "choice": [..] }
            for q_type, qa_list in qa_dict.items():
                for question, answer in qa_list:
                    qa_type_counter[q_type] += 1
                    multimodal_item = {
                        "question_type": q_type,
                        "instruction": "<image>" + question,
                        "input": "",
                        "output": answer,
                        "images": [item["image_path"]]
                    }
                    if subset == "train":
                        simpleQA_train.append(multimodal_item)

    # 打乱
    random.shuffle(simpleQA_train)

    # 抽样可视化
    vis_n = min(3, len(simpleQA_train))
    if vis_n > 0:
        samples = random.sample(simpleQA_train, k=vis_n)
        print("=" * 60)
        print(f"🎯 随机抽取 {vis_n} 条训练样本可视化：")
        print("=" * 60)
        for i, it in enumerate(samples, 1):
            print(f"\n📘 样本 {i}:")
            print(f"🖼️  图片路径: {it['images'][0]}")
            print(f"🧩  指令: {it['instruction']}")
            print(f"📝  输出: {it['output']}")
            print("-" * 60)

    # ================= 统计题型分布 =================
    total_qa = sum(qa_type_counter.values())
    logger.info(f"simpleQA: train={len(simpleQA_train)}")

    # 输出分布详细统计
    logger.info("各题型生成数量统计（绝对数 & 占比）:")
    for q_type, count in qa_type_counter.items():
        ratio = (count / total_qa) * 100 if total_qa > 0 else 0
        logger.info(f"  - {q_type:<18}: {count:>6}  ({ratio:5.2f}%)")

    # ================= 保存 Alpaca 主数据 =================
    save_json(simpleQA_train, os.path.join(alpaca_save_dir, "med_pub_simpleQA_train.json"))

    # === 更新 dataset_info.json ===
    data_info = {
        "med_pub_simpleQA_train": {
            "file_name": "med_pub_simpleQA_train.json",
            "columns": {
                "prompt": "instruction",
                "query": "input",
                "response": "output",
                "images": "images"
            }
        }
    }

    merge_dataset_info(alpaca_save_dir, data_info)
    logger.success("✅ 全部Alpaca格式SFT数据集已分开输出完毕。")

# ===================== 主入口 =====================

if __name__ == "__main__":
    args = parse_args()
    build_multimodal_dataset(
        data_root=args.data_root,
        disease_map_path=args.disease_map_path,
        quality_map_path=args.quality_map_path,
        interim_dir=args.interim_dir,
        alpaca_save_dir=args.alpaca_save_dir,
        test_ratio=args.test_ratio,
        log_dir=args.log_dir,

        enhanced_save_dir=args.enhanced_dir,
        enhance_count_strategy=args.enhance_strategy,
        enhance_per_image_k=args.enhance_per_k,
        enhance_seed=args.seed,
        enhance_force_rebuild=args.enhance_force_rebuild
    )