# -*- coding: utf-8 -*-
"""
<thinking>
 <region>(0.1234,0.2345,0.3456,0.4567)</region>
 <description>观察到xxx玻璃膜疣</description>
 <analyse>观察到xxx符合amd早期症状，但xx并没发生在黄斑</analyse>
</thinking>
<answer> 综上分析，诊断为黄斑水肿 </answer

medgemma 
deepseek- medd
生成 raw CoT Alpaca 数据（占位 analyse，无链路推理；region+description 来自词典）
Author: zym1105
Date: 2025-10-10

"""

import os
import json
import random
from typing import Dict, List, Any, Tuple, Optional
from collections import defaultdict
from loguru import logger


# ---------------- 工具函数 ---------------- #

def read_json(p: str) -> Any:
    with open(p, "r", encoding="utf-8") as f:
        return json.load(f)

def save_json(obj: Any, p: str):
    os.makedirs(os.path.dirname(p), exist_ok=True)
    with open(p, "w", encoding="utf-8") as f:
        json.dump(obj, f, ensure_ascii=False, indent=2)
    logger.info(f"已保存: {p}")

def ensure_list_labels(lbl) -> List[str]:
    if lbl is None:
        return []
    if isinstance(lbl, list):
        return [str(x) for x in lbl if str(x).strip()]
    if isinstance(lbl, str):
        s = lbl.strip()
        return [s] if s else []
    return []

def format_box_xyxy(box: List[float], ndigits: int = 4) -> str:
    """box: [x1,y1,x2,y2] in [0,1]. Return '(x1,y1,x2,y2)' with fixed decimals."""
    x1, y1, x2, y2 = box
    return f"({round(float(x1), ndigits)},{round(float(y1), ndigits)}," \
           f"{round(float(x2), ndigits)},{round(float(y2), ndigits)})"

def pick_split_keys(split: Dict[str, List[str]]) -> Tuple[List[str], List[str]]:
    """
    兼容两种键：
      - 普通: 'train' / 'test'
      - 带 SFT/RL: 'train_sft' / 'train_rl' / 'test' （此处仅使用 train_sft 与 test）
    """
    if "train_sft" in split:
        train_keys = list(split.get("train_sft", []))
    else:
        train_keys = list(split.get("train", []))
    test_keys = list(split.get("test", []))
    return train_keys, test_keys


# ---------------- CoT 生成器 ---------------- #

class CoTGenerator:
    """
    依据 lesion->中文描述词典，为每张图的每个检测框生成 <region> 与 <description> 成对字段；
    <analyse> 占位为空，不输出推理过程；<answer> 输出标签。
    """
    def __init__(self, lesion_desc_map: Dict[str, str]):
        # 词典：中文病灶名 -> 简要中文描述
        self.desc_map = dict(lesion_desc_map or {})

    def _desc_of(self, lesion_name_cn: str) -> str:
        # 若词典缺失，退化为 “<病灶名>” 简称，避免空串
        return self.desc_map.get(lesion_name_cn, lesion_name_cn)

    def build_thinking_xml(self, detection: Dict[str, List[List[float]]]) -> str:
        """
        detection: { lesion_cn: [[x1,y1,x2,y2], ...], ... }
        """
        lines = ["<thinking>"]
        # 固定顺序：病灶名排序 -> 框排序，确保可复现
        for lesion in sorted(detection.keys()):
            bxs = detection.get(lesion) or []
            for box in bxs:
                lines.append(f"    <region>{format_box_xyxy(box)}</region>")
                lines.append(f"    <description>{self._desc_of(lesion)}</description>")
        lines.append("    <analyse></analyse>")
        lines.append("</thinking>")
        return "\n".join(lines)

    @staticmethod
    def build_answer_xml(labels: List[str]) -> str:
        label_text = "、".join(labels) if labels else "未标注"
        return f"<answer> 综上分析，诊断为{label_text} </answer>"

    def build_output_text(self, detection: Dict[str, List[List[float]]], labels: List[str]) -> str:
        return self.build_thinking_xml(detection) + "\n" + self.build_answer_xml(labels)


# ---------------- 数据收集与筛选 ---------------- #

def _merge_two_annos(aier_ann_path: str, medpub_ann_path: str) -> Tuple[Dict[str, Any], Dict[str, Any]]:
    """
    读取两个数据源的 annotations.json。要求每个条目至少包含:
      image_name, image_path, detection(可选), label(可选)
    """
    aier = read_json(aier_ann_path)
    medp = read_json(medpub_ann_path)
    return aier, medp

def _filter_by_split(
    ann: Dict[str, Any],
    split_keys: List[str],
    need_detection: bool,
    exclude_normal: bool
) -> Dict[str, Any]:
    """
    从 ann 中抽取 split_keys 列表中的样本；可要求有 detection；可排除 label==正常眼底。
    返回 {image_name: item}
    """
    out = {}
    for k in split_keys:
        if k not in ann:
            continue
        it = ann[k]
        labels = ensure_list_labels(it.get("label"))
        if need_detection and not it.get("detection"):
            continue
        if exclude_normal and any(lbl == "正常眼底" for lbl in labels):
            continue
        out[k] = it
    return out

def _pool_normals_from_medpub(
    medp: Dict[str, Any],
    split_keys: List[str]
) -> List[str]:
    """
    在 split_keys 范围内，收集 MedPub 的正常眼底 pool（无需 detection）。
    返回 image_name 列表。
    """
    pool = []
    for k in split_keys:
        it = medp.get(k)
        if not it:
            continue
        labels = ensure_list_labels(it.get("label"))
        if labels and all(lbl == "正常眼底" for lbl in labels):
            pool.append(k)
    return pool


# ---------------- 主构建流程 ---------------- #

def build_cot_data(
    aier_ann_path: str,
    medpub_ann_path: str,
    split_path: str,
    lesion_description_path: str,
    out_dir: str,
    normal_ratio: float = 0.2,
    seed: int = 42
):
    """
    目标：生成 raw_cot_alpaca_train/test.json
    - 仅使用 split 中的 image_name
    - Aier/MedPub 病灶样本：要求 detection + label，且 label != 正常眼底
    - 额外从 MedPub 中抽取“正常眼底”（无需 detection），数量 = Aier(该子集病灶数) * normal_ratio
    """
    os.makedirs(out_dir, exist_ok=True)
    rng = random.Random(seed)

    # 读取数据
    aier, medp = _merge_two_annos(aier_ann_path, medpub_ann_path)
    split_obj = read_json(split_path)
    lesion_desc = read_json(lesion_description_path)

    # 解析 split
    train_keys, test_keys = pick_split_keys(split_obj)

    # 确认 split 中每个 key 至少存在于某个源（aier 或 medpub）
    merged_index = set(aier.keys()) | set(medp.keys())
    missing = [k for k in (train_keys + test_keys) if k not in merged_index]
    if missing:
        raise ValueError(f"split 中存在 {len(missing)} 个 image_name 不在任一 annotations 中，例如: {missing[:5]}")

    # --------- 训练集 ---------
    # Aier：有检测、有标签、非正常
    train_aier_pos = _filter_by_split(aier, train_keys, need_detection=True, exclude_normal=True)
    # MedPub：同上（补充病灶样本）
    train_medp_pos = _filter_by_split(medp, train_keys, need_detection=True, exclude_normal=True)

    # 目标正常数（仅按 Aier 病灶数计算）
    target_normals_train = int(len(train_aier_pos) * normal_ratio)

    # MedPub 正常池（无需检测）
    train_medp_norm_pool = _pool_normals_from_medpub(medp, train_keys)
    rng.shuffle(train_medp_norm_pool)
    train_medp_norm = train_medp_norm_pool[:target_normals_train]
    if len(train_medp_norm) < target_normals_train:
        logger.warning(f"[train] 目标正常眼底 {target_normals_train}，但可用 {len(train_medp_norm)}。")

    # 汇总训练候选（保持不重复）
    train_keys_final = list(dict.fromkeys(list(train_aier_pos.keys()) +
                                          list(train_medp_pos.keys()) +
                                          train_medp_norm))

    # --------- 测试集（同理） ---------
    test_aier_pos = _filter_by_split(aier, test_keys, need_detection=True, exclude_normal=True)
    test_medp_pos = _filter_by_split(medp, test_keys, need_detection=True, exclude_normal=True)

    target_normals_test = int(len(test_aier_pos) * normal_ratio)
    test_medp_norm_pool = _pool_normals_from_medpub(medp, test_keys)
    rng.shuffle(test_medp_norm_pool)
    test_medp_norm = test_medp_norm_pool[:target_normals_test]
    if len(test_medp_norm) < target_normals_test:
        logger.warning(f"[test] 目标正常眼底 {target_normals_test}，但可用 {len(test_medp_norm)}。")

    test_keys_final = list(dict.fromkeys(list(test_aier_pos.keys()) +
                                         list(test_medp_pos.keys()) +
                                         test_medp_norm))

    logger.info(f"汇总训练样本: 病灶(Aier)={len(train_aier_pos)}, 病灶(MedPub)={len(train_medp_pos)}, 正常(MedPub)={len(train_medp_norm)}, 合计={len(train_keys_final)}")
    logger.info(f"汇总测试样本: 病灶(Aier)={len(test_aier_pos)}, 病灶(MedPub)={len(test_medp_pos)}, 正常(MedPub)={len(test_medp_norm)}, 合计={len(test_keys_final)}")

    # 构建生成器
    cot = CoTGenerator(lesion_desc)

    # 生成 Alpaca 行
    def _emit(keys: List[str]) -> List[Dict[str, Any]]:
        bag = []
        for k in keys:
            it = (aier.get(k) or medp.get(k))
            # label
            labels = ensure_list_labels(it.get("label"))
            # detection（正常可能没有）
            detection = it.get("detection") or {}
            # 组装输出
            output_text = cot.build_output_text(detection, labels)
            row = {
                "instruction": "<image>请基于图像中关键病灶给出描述与最终诊断。",
                "input": "",
                "output": output_text,
                "images": [it["image_path"]],
                "image_name": k
            }
            bag.append(row)
        return bag

    train_rows = _emit(train_keys_final)
    test_rows  = _emit(test_keys_final)

    # 保存
    save_json(train_rows, os.path.join(out_dir, "raw_cot_alpaca_train.json"))
    save_json(test_rows,  os.path.join(out_dir, "raw_cot_alpaca_test.json"))

    # 记录实际使用到的样本清单，方便溯源
    used = {
        "train_used": train_keys_final,
        "test_used": test_keys_final,
        "train_detail": {
            "aier_pos": list(train_aier_pos.keys()),
            "medpub_pos": list(train_medp_pos.keys()),
            "medpub_normal": train_medp_norm
        },
        "test_detail": {
            "aier_pos": list(test_aier_pos.keys()),
            "medpub_pos": list(test_medp_pos.keys()),
            "medpub_normal": test_medp_norm
        }
    }
    save_json(used, os.path.join(out_dir, "raw_cot_used_index.json"))
    logger.success("raw CoT Alpaca 数据已生成")


# ---------------- CLI ---------------- #

def parse_args():
    import argparse
    ap = argparse.ArgumentParser(description="生成 raw CoT Alpaca（占位 analyse，无链路推理）")
    ap.add_argument("--aier_annotations", type=str,default='./experiments/interim/aier_coor_qa/coor_qa_annotations_merged.json',
                    help="Aier 数据集的 annotations.json（合并增强后的版本）")
    ap.add_argument("--medpub_annotations", type=str, default='./experiments/interim/coor_qa/coor_qa_annotations_merged.json',
                    help="MedPub 数据集的 annotations.json")
    ap.add_argument("--split_path", type=str, default='./experiments/interim/aier_coor_qa/split_merged.json',
                    help="split.json（包含 train/test 或 train_sft/test）")
    ap.add_argument("--lesion_description_path", type=str, default="configs/lesion_discription.json",
                    help="病灶中文描述词典 JSON（病灶名->描述）")
    ap.add_argument("--out_dir", type=str, default="/home/zhangpinglu/data0/gy/Dataset/fundusreasoner/Alpaca_cot_raw",
                    help="输出目录")
    ap.add_argument("--normal_ratio", type=float, default=0.2,
                    help="额外正常眼底抽取比例 = Aier 病灶量 * ratio")
    ap.add_argument("--seed", type=int, default=42, help="随机种子")
    return ap.parse_args()


if __name__ == "__main__":
    args = parse_args()
    os.makedirs(args.out_dir, exist_ok=True)
    build_cot_data(
        aier_ann_path=args.aier_annotations,
        medpub_ann_path=args.medpub_annotations,
        split_path=args.split_path,
        lesion_description_path=args.lesion_description_path,
        out_dir=args.out_dir,
        normal_ratio=args.normal_ratio,
        seed=args.seed
    )
