# -*- coding: utf-8 -*-
"""
文件: src/coor_qa_pipeline.py

目标:
  1) 从 processed 各数据集的 annotations.json 中抽取
     - segmentation -> detection(归一化 bbox)
     - location -> 归一化点
     - 同时遵循全局 split.json (global_split) 来确定 train/test
  2) (可选) 调用增强器对坐标类样本做增强, 并把增强样本合并回 train
  3) 构造“坐标类 QA”问答, 输出 Alpaca SFT 格式 (train/test)
  4) 保存所有中间件(原始注释/合并注释/增强注释/合并 split)

依赖:
  - src/utils/seg2dect.py    (mask -> list[[x1,y1,x2,y2]] 像素坐标)
  - src/utils/loc_enhance.py (我已提供: CoordImageEnhancer)

注意:
  - mask -> bbox 后进行归一化存储; 生成 QA 时再按图片尺寸转回像素输出
  - 只要图片具有 segmentation 或 location 任一项, 就纳入坐标 QA 语料
  - global_split.json 里应包含 {"train":[...], "test":[...]} (键是 image_name)

作者: zym1105
日期: 2025-10-10
"""

import os,sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import json
import random
from typing import Dict, List, Tuple, Optional, Any

from PIL import Image
from tqdm import tqdm
from collections import Counter, defaultdict

from loguru import logger

from src.utils.seg2dect import seg2dect                      
from src.utils.loc_enhance import CoordImageEnhancer         
from src.utils.coor_qa_prompt_construct import build_box_qa  
from src.utils.coor_qa_prompt_construct import BoxQAGenerator
# ------------------------ 基础工具 ------------------------ 

def read_json(p: str) -> Any:
    with open(p, "r", encoding="utf-8") as f:
        return json.load(f)

def save_json(obj: Any, p: str):
    os.makedirs(os.path.dirname(p), exist_ok=True)
    with open(p, "w", encoding="utf-8") as f:
        json.dump(obj, f, ensure_ascii=False, indent=2)
    logger.info(f"已保存: {p}")

def setup_logger(log_dir: str):
    os.makedirs(log_dir, exist_ok=True)
    log_path = os.path.join(log_dir, "coor_qa_pipeline.log")
    logger.add(log_path, rotation="10 MB", encoding="utf-8", enqueue=True)
    logger.info(f"日志输出: {log_path}")

def merge_dataset_info(root: str, info: Dict[str, Any]):
    path = os.path.join(root, "dataset_info.json")
    all_info = read_json(path) if os.path.exists(path) else {}
    all_info.update(info)
    save_json(all_info, path)
    logger.success("dataset_info.json 已更新")
# ------------------------ 核心 1: 收集数据 ------------------------ #

def _seg_to_boxes(mask_path: str) -> List[List[float]]:
    """
    兼容封装: 返回像素坐标 [[x1,y1,x2,y2], ...]
    优先尝试 seg2dect(mask_path); 失败则以 PIL.Image 读入再传入。
    """
    try:
        return seg2dect(mask_path) 
    except Exception as e:
        raise RuntimeError(f"[seg2dect] 解析失败: {mask_path} -> {repr(e)}")
# ------------------------ 核心 1: 收集数据 ------------------------ #

def gather_data(
    data_root: str,
    global_split_path: str,
    loc_key_map_path: str,
    diease_map_path :str,
    quality_map_path :str,
    save_dir: str
) -> Tuple[Dict[str, Any], Dict[str, List[str]]]:
    """
    从 data_root 下每个数据集(子文件夹)读取 annotations.json
    只要含 segmentation 或 location 就进坐标 QA 语料。
    遵循 global_split.json 进行 train/test 划分。

    返回:
      annotations: { image_name: {
           "image_name", "image_path",
           "detection": {cn_key: [[x1,y1,x2,y2] (norm), ...], ...}  可选
           "location": {cn_key: [[x,y] (norm), ...], ...}       可选
        } }
      split: {"train":[...], "test":[...]}  (仅保留在 annotations 中的键)
    """
    logger.info("开始收集坐标类标注数据 ...")
    os.makedirs(save_dir, exist_ok=True)

    # key 映射: 英文类别 -> 中文可读
    key_map = read_json(loc_key_map_path)  # 例如 {"optic disc":"视盘","optic cup":"视杯","macula":"黄斑", ...}
    diease_map = read_json(diease_map_path)
    quality_map=read_json(quality_map_path)
    # 全局划分
    global_split = read_json(global_split_path)
    g_train = set(global_split.get("train", []))
    g_test  = set(global_split.get("test", []))

    annotations: Dict[str, Any] = {}
    split = {"train": [], "test": []}

    sub_dirs = [d for d in os.listdir(data_root) if os.path.isdir(os.path.join(data_root, d))]
    for sub in sorted(sub_dirs):
        anno_path = os.path.join(data_root, sub, "annotations.json")
        if not os.path.exists(anno_path):
            continue
        data = read_json(anno_path)
        for image_name, item in data.items():
            diag = item.get("diagnosis", {})
            seg = diag.get("segmentation", {})
            loc = diag.get("location", {})
            class_label_en= diag.get('classification',{}).get('text',{})
            quality_en =diag.get('classification',{}).get('quality',{})
            if not seg and not loc:
                continue

            # 决定 split
            if image_name in g_train:
                put = "train"
            elif image_name in g_test:
                put = "test"
            else:
                # 该图未出现在 global_split, 跳过或可选择 raise
                logger.warning(f"[split] {image_name} 未在 global_split 中，已跳过")
                continue

            # 构造 detection / location (归一化)
            record = {
                "image_name": image_name,
                "image_path": item["image_path"],
            }

            # segmentation -> detection bboxes (norm)
            if seg:
                det_dict = {}
                for en_key, mask_path in seg.items():
                    if not os.path.exists(mask_path):
                        logger.warning(f"[seg] mask 缺失: {mask_path} ({image_name})")
                        continue
                    # 打开 mask 获取尺寸以便 bbox 归一化
                    # with Image.open(mask_path) as m:
                    #     mw, mh = m.size
                    boxes_xyxy = _seg_to_boxes(mask_path)  # 预期返回像素坐标列表

                    cn_key = key_map.get(en_key, None)  
                    if cn_key is None:
                        raise KeyError(f"[key_map] 未找到 '{en_key}' 的中文映射，请检查 loc_key_map_path")
                    if len(cn_key) == 0:
                        continue # 故意跳过血管分割
                    det_dict.setdefault(cn_key, []).extend(boxes_xyxy)
                if det_dict:
                    record["detection"] = det_dict
            
            # location -> norm points
            if loc:
                loc_dict = {}
                for en_key, pts in loc.items():
                    # pts 应该已是 0~1 坐标; 这里做基本校验/裁剪
                
                    cn_key = key_map.get(en_key, None)
                    if cn_key is None:
                        raise KeyError(f"[key_map] 未找到 '{en_key}' 的中文映射，请检查 loc_key_map_path")
                    if len(cn_key) == 0:
                        continue # 故意跳过血管分割
                        
                    loc_dict.setdefault(cn_key, []).extend(pts)
                if loc_dict:
                    record["location"] = loc_dict
            if class_label_en:
                text_cn= diease_map[class_label_en]
                if text_cn: # 可能为空，表示故意跳过
                    record['label'] = text_cn #要记住这个是list 哦
            if quality_en:
                quality_cn = quality_map[quality_en]
                if quality_cn: #印象中不存在空字符串，但为了之后扩展
                    record['quality'] = quality_cn
            # 若至少有一项
            if ("detection" in record) or ("location" in record):
                annotations[image_name] = record #
                split[put].append(image_name)

    # 保存“原始坐标注释”
    ann_path = os.path.join(save_dir, "coor_qa_annotations.json")
    split_path = os.path.join(save_dir, "coor_qa_split.json")
    save_json(annotations, ann_path)
    save_json(split, split_path)
    logger.success(f"收集完成: 共 {len(annotations)} 张; train={len(split['train'])}, test={len(split['test'])}")
    return annotations, split

# ------------------------ 核心 2: 生成增强并合并 ------------------------ #

def build_enhanced_coor_qa(
    base_annotations_path: str,
    enhanced_save_dir: str,
    count_strategy: str = "random",
    per_image_k: int = 2,
    op_probs: Optional[Dict[str, float]] = None,
    seed: int = 42,
    force_rebuild: bool = False
) -> Dict[str, Any]:
    """
    读取 base_annotations, 调用 CoordImageEnhancer 生成增强样本(全部样本均可增强),
    保存增强注释到 enhanced_save_dir/annotations.json 并返回 {"annotations": dict, "stats": {...}}
    """
    if force_rebuild and os.path.exists(enhanced_save_dir):
        import shutil
        shutil.rmtree(enhanced_save_dir)
        logger.warning(f"[增强] 已删除旧目录: {enhanced_save_dir}")

    os.makedirs(enhanced_save_dir, exist_ok=True)
    base_annotations = read_json(base_annotations_path)

    enhancer = CoordImageEnhancer(
        save_dir=enhanced_save_dir,
        count_strategy=count_strategy,
        per_image_k=per_image_k,
        op_probs=op_probs,
        seed=seed
    )

    enhanced_ann: Dict[str, Any] = {}
    n_imgs = 0
    n_new = 0

    for image_name, rec in tqdm(base_annotations.items(), desc="增强样本"):
        n_imgs += 1
        det = rec.get("detection")
        loc = rec.get("location")
        if not det and not loc:
            continue

        new_items = enhancer.enhance_one(
            image_path=rec["image_path"],
            image_name=rec["image_name"],
            detection_dict=det,
            location_dict=loc,
            source_dataset=rec.get("source_dataset"),
            label=rec.get("label"),
            quality=rec.get("quality")
        )
        for it in new_items:
            enhanced_ann[it["image_name"]] = it
        n_new += len(new_items)

    # 保存增强注释
    enh_ann_path = os.path.join(enhanced_save_dir, "annotations.json")
    save_json(enhanced_ann, enh_ann_path)
    stats = {"base_images": n_imgs, "enhanced_images": n_new}
    return {"annotations": enhanced_ann, "stats": stats}

def merge_enhanced_into_base(
    base_annotations: Dict[str, Any],
    base_split: Dict[str, List[str]],
    enhanced_annotations: Dict[str, Any],
    put_subset: str = "train"
) -> Tuple[Dict[str, Any], Dict[str, List[str]]]:
    """
    把增强样本合并到 base 的 annotations/split 中 (仅加入 train 或指定 put_subset)
    """
    merged_ann = dict(base_annotations)
    merged_split = {"train": list(base_split.get("train", [])),
                    "test":  list(base_split.get("test", []))}

    add_keys = []
    for k, v in enhanced_annotations.items():
        merged_ann[k] = v
        add_keys.append(k)

    merged_split.setdefault(put_subset, [])
    merged_split[put_subset].extend(add_keys)
    logger.info(f"[合并] 向 {put_subset} 追加增强样本 {len(add_keys)} 条")
    return merged_ann, merged_split



# ------------------------ 主流程 ------------------------ #

def build_multimodal_dataset(
    data_root: str,
    loc_key_map_path: str,
    global_split_path: str,
    interim_dir: str,
    alpaca_save_dir: str,
    # 增强相关参数
    enable_enhance: bool = True,
    enhanced_save_dir: str = "./experiments/dataset_enhanced/coor_qa",
    enhance_count_strategy: str = "random",
    enhance_per_image_k: int = 2,
    enhance_op_probs: Optional[Dict[str, float]] = None,
    enhance_seed: int = 42,
    enhance_force_rebuild: bool = False,
    # 其他
    log_dir: str = "./experiments/log"
):
    """
    流程:
      1) gather_data -> interim_dir/{coor_qa_annotations.json, coor_qa_split.json}
      2) 可选增强 -> enhanced_save_dir/annotations.json
      3) 合并增强 -> interim_dir/{coor_qa_annotations_merged.json, split_merged.json}
      4) 生成 Alpaca -> alpaca_save_dir/{med_pub_coor_qa_train.json, med_pub_coor_qa_test.json}
    """
    setup_logger(log_dir)
    os.makedirs(interim_dir, exist_ok=True)
    os.makedirs(alpaca_save_dir, exist_ok=True)

    # 1) 收集
    ann, split = gather_data(
        data_root=data_root,
        global_split_path=global_split_path,
        loc_key_map_path=loc_key_map_path,
        save_dir=interim_dir,
        diease_map_path="./configs/diseases_discription.json",
        quality_map_path="./configs/quality_discription.json"
    )

    # 2) 增强
    if enable_enhance:
        enh = build_enhanced_coor_qa(
            base_annotations_path=os.path.join(interim_dir, "coor_qa_annotations.json"),
            enhanced_save_dir=enhanced_save_dir,
            count_strategy=enhance_count_strategy,
            per_image_k=enhance_per_image_k,
            op_probs=(enhance_op_probs or {"h":0.4,"v":0.4,"vh":0.15,"rot90":0.05}),
            seed=enhance_seed,
            force_rebuild=enhance_force_rebuild
        )
        enhanced_ann = enh["annotations"]
        logger.info(f"增强完成: {enh['stats']}")
        merged_ann, merged_split = merge_enhanced_into_base(
            base_annotations=ann,
            base_split=split,
            enhanced_annotations=enhanced_ann,
            put_subset="train"
        )
    else:
        merged_ann, merged_split = ann, split

    # 保存合并中间件
    merged_ann_path = os.path.join(interim_dir, "coor_qa_annotations_merged.json")
    merged_split_path = os.path.join(interim_dir, "split_merged.json")
    save_json(merged_ann, merged_ann_path)
    save_json(merged_split, merged_split_path)
    logger.success("合并版中间件已保存")

    # 3) 生成 Alpaca

    coor_qa_train = []
    qa_type_counter = Counter()
    gen = BoxQAGenerator(
            iou_neg_threshold=0.05,
            black_ratio_threshold=0.20,
            neg_trials=5,
            rng_seed=42
        )
    for subset, keys in merged_split.items():
        if subset == "test":
            continue  # test 集单独留给后续处理
        for k in tqdm(keys, desc=f"生成QA-{subset}"):
            item = merged_ann.get(k)
            if not item:
                continue

            image_path = item["image_path"]
            # 1) detection -> 用新的 build_box_qa
            det = item.get("detection", {})
            triples: List[Tuple[str, str, str]] = []
            for cn_key, norm_boxes in det.items():
                if not norm_boxes:
                    continue
                triples.extend(
                    build_box_qa(
                        gen=gen,
                        label_cn=cn_key,
                        bboxes_norm=norm_boxes,
                        image_path=image_path,
                        num_pos=2,                 # 正例判别条数
                        num_neg=2,                 # 负例判别条数
                    )
                )

            # 2) location -> 若你还需要点定位 QA，可在此继续追加
            loc = item.get("location", {})
            if loc:
                with Image.open(image_path) as im:
                    W, H = im.size
                
                for cn_key, norm_pts in loc.items():
                    if not norm_pts:
                        continue
                    #目前loc只有单个
                    norm_pts=[norm_pts]
                    pts_px = [[int(round(x*W)), int(round(y*H))] for x,y in norm_pts]
                    if len(pts_px) == 1:
                        q = f"请给出图中“{cn_key}”的大致像素坐标位置。"
                        a = f"位置坐标：[x,y]={pts_px[0]}。"
                    else:
                        q = f"图中包含多个“{cn_key}”位置，请给出这些像素坐标。"
                        a = f"坐标共有 {len(pts_px)} 个：" + "; ".join([str(p) for p in pts_px]) + "。"
                    triples.append((q, a, "point"))

            # 3) 写入 Alpaca 行
            for q, a, qtype in triples:
                qa_type_counter[qtype] += 1
                row = {
                    "instruction": "<image>" + q,
                    "input": "",
                    "output": a,
                    "images": [image_path],
                }
                if subset == "train":
                    coor_qa_train.append(row)
    # 可视化 few
    vis = min(3, len(coor_qa_train))
    if vis > 0:
        samp = random.sample(coor_qa_train, vis)
        print("=" * 60)
        print(f"🎯 随机抽取 {vis} 条训练样本可视化：")
        print("=" * 60)
        for i, it in enumerate(samp, 1):
            print(f"\n📘 样本 {i}")
            print(f"🖼️  {it['images'][0]}")
            print(f"🧩  指令: {it['instruction']}")
            print(f"📝  答案: {it['output']}")
            print("-" * 60)

    logger.info(f"coor_qa: train={len(coor_qa_train)}")
    logger.info("各题型统计: " + str(dict(qa_type_counter)))

    # 保存 Alpaca
    out_train = os.path.join(alpaca_save_dir, "med_pub_coor_qa_train.json")
    save_json(coor_qa_train, out_train)

    # dataset_info
    info = {
        "med_pub_coor_qa_train": {
            "file_name": "med_pub_coor_qa_train.json",
            "columns": {"prompt":"instruction","query":"input","response":"output","images":"images"}
        }
    }
    merge_dataset_info(alpaca_save_dir, info)
    logger.success("全部 Alpaca SFT 数据输出完成")

# ------------------------ CLI ------------------------ #

def parse_args():
    import argparse
    ap = argparse.ArgumentParser(description="坐标类 QA 构建流水线")
    ap.add_argument("--data_root", type=str, default="/home/zhangpinglu/data0/gy/Dataset/public_processed",
                    help="processed 根目录(含多个子数据集, 每个子目录里有 annotations.json)")
    ap.add_argument("--global_split_path", type=str, default='/home/zhangpinglu/data0/gy/Dataset/fundusreasoner/dataset_support/split.json',
                    help="全局 split.json (包含 image_name 的 train/test 划分)")
    ap.add_argument("--loc_key_map_path", type=str, default='./configs/lesion_map.json',
                    help="定位/检测类别的英文->中文映射 json")
    ap.add_argument("--interim_dir", type=str, default="./experiments/interim/coor_qa",
                    help="保存原始/合并注释与 split 的中间件路径")
    ap.add_argument("--alpaca_save_dir", type=str, default="/home/zhangpinglu/data0/gy/Dataset/fundusreasoner/Alpaca_data",
                    help="输出 Alpaca SFT 的保存目录")
    # 增强
    ap.add_argument("--enable_enhance", action="store_true", help="是否启用增强")
    ap.add_argument("--enhanced_save_dir", type=str, default="./experiments/dataset_enhanced/coor_qa")
    ap.add_argument("--enhance_strategy", type=str, default="random", choices=["random","fixed"])
    ap.add_argument("--enhance_per_k", type=int, default=2)
    ap.add_argument("--enhance_seed", type=int, default=42)
    ap.add_argument("--enhance_force_rebuild", action="store_true")
    # 其他
    ap.add_argument("--log_dir", type=str, default="./experiments/log")
    return ap.parse_args()

if __name__ == "__main__":
    args = parse_args()
    build_multimodal_dataset(
        data_root=args.data_root,
        loc_key_map_path=args.loc_key_map_path,
        global_split_path=args.global_split_path,
        interim_dir=args.interim_dir,
        alpaca_save_dir=args.alpaca_save_dir,
        enable_enhance=args.enable_enhance,
        enhanced_save_dir=args.enhanced_save_dir,
        enhance_count_strategy=args.enhance_strategy,
        enhance_per_image_k=args.enhance_per_k,
        enhance_seed=args.enhance_seed,
        enhance_force_rebuild=args.enhance_force_rebuild,
        log_dir=args.log_dir
    )
