# -*- coding: utf-8 -*-
"""
文件名: merge_detection_annotations.py
功能:
  - 读取原始 data_path 对应的 detection/annotations.json 文件；
  - 若指定 from_back_up，则从该备份路径读取数据，不再执行备份；
  - 同时读取 ajust_dir 中的 classification/train.csv 与 detection/annotations.json；
  - 将分类结果与检测结果合并到新的结构中；
  - 最终更新 data_path 中各 image_name 对应的 diagnosis 字段并保存；
  - 在数据目录下自动备份原始 JSON，文件名添加 _back_up_date。

使用样例:
    python merge_detection_annotations.py \
        --data_path ./20250928/data/detection/annotations.json \
        --ajust_dir /mnt/d/aier_orignal \
        --back_up_date 20251016 \
        --from_back_up ""

说明:
    - train.csv 为医院标注命名方式，保留原文件名但不代表训练集；
    - 质量标签已在上游流程筛除，此处不处理；
    - classification 中 “无AMD” 视为 “正常眼底”；
    - 若分类或检测缺失，直接 raise 以保证数据一致性。
"""

import os
import json
import argparse
import shutil
import csv
from collections import defaultdict
from PIL import Image


def parse_args():
    """参数解析"""
    parser = argparse.ArgumentParser(description="绘制目标检测结果并合并分类标注")
    parser.add_argument("--data_path", type=str, default="/home/zhangpinglu/data0/gy/Dataset/aier_processed/annotations.json",
                        help="主标注文件路径，例如 ./20250928/data/detection/annotations.json")
    parser.add_argument("--ajust_dir", type=str, default='/home/zhangpinglu/data0/gy/Dataset/aier_adjust/aier_adjust0928',
                        help="增补标注的路径，包含 classification 和 detection 两个文件夹")
    parser.add_argument("--back_up_date", type=str, default="20251016",
                        help="备份日期（如 20251016），用于生成备份文件名")
    parser.add_argument("--from_back_up", type=str, default="/home/zhangpinglu/data0/gy/Dataset/aier_processed/annotations_20251016.json",
                        help="若指定此路径，则从该备份文件读取，不再执行备份逻辑")
    return parser.parse_args()


def draw_annotations(args):
    """整合 classification + detection 标注"""
    # ============ Step 1. 读取原始 data ============ #
    read_path = args.from_back_up if args.from_back_up else args.data_path
    if not os.path.exists(read_path):
        raise FileNotFoundError(f"找不到输入文件: {read_path}")

    with open(read_path, "r", encoding="utf-8") as f:
        data_dict = json.load(f)

    # 若未使用 from_back_up，则执行备份
    if not args.from_back_up:
        backup_path = args.data_path.replace(".json", f"_{args.back_up_date}.json")
        shutil.copy(args.data_path, backup_path)
        print(f"✅ 已备份原始数据到: {backup_path}")

    # ============ Step 2. 读取分类标注 ============ #
    csv_path = os.path.join(args.ajust_dir, "classification", "train.csv")
    if not os.path.exists(csv_path):
        raise FileNotFoundError(f"分类文件不存在: {csv_path}")

    new_data = {}
    rm_list=[]
    with open(csv_path, "r", encoding="utf-8") as f:
        reader = csv.reader(f)
        header = next(reader)  # 读取表头
        for row in reader:
            if not row or len(row) < 3:
                continue
            img_name = row[0].strip()

            labels = row[2:]  # 跳过文件名与 user_id
            # 将 AMD 标签转为中文
            chinese_labels = []
            for col_name, val in zip(header[2:], labels):
                if val == "1":
                    if "无AMD" in col_name:
                        chinese_labels.append("正常眼底")
                    elif "早期AMD" in col_name:
                        chinese_labels.append("早期年龄相关黄斑变性")
                    elif "中期AMD" in col_name:
                        chinese_labels.append("中期年龄相关黄斑变性")
                    elif "进展期新生血管性AMD" in col_name:
                        chinese_labels.append("进展期新生血管性黄斑变性")
                    elif "进展期地图样萎缩AMD" in col_name:
                        chinese_labels.append("进展期地图样萎缩黄斑变性")
                    elif "图片质量差" in col_name: # 这俩都没触发过，出于严谨保留
                        rm_list.append(img_name)
                      
                    elif "病灶难以标注" in col_name:
                        
                        rm_list.append(img_name)

            if not chinese_labels:
                continue
            new_data[img_name] = {"classification": {"text": chinese_labels}}

    # ============ Step 3. 读取 detection 标注 ============ #
    detect_path = os.path.join(args.ajust_dir, "detection", "annotations.json")
    if not os.path.exists(detect_path):
        raise FileNotFoundError(f"检测标注文件不存在: {detect_path}")

    with open(detect_path, "r", encoding="utf-8") as f:
        anno = json.load(f)

    images = {img["id"]: img for img in anno["images"]}
    categories = {cat["id"]: cat["name"] for cat in anno["categories"]}

    # 将标注聚合到 image_id
    image2anns = defaultdict(list)
    for ann in anno["annotations"]:
        image2anns[ann["image_id"]].append(ann)

    # ============ Step 4. 整合 detection 数据 ============ #
    for image_id, anns in image2anns.items():
        image_info = images[image_id]
        file_name = image_info["file_name"]
        
        crop_iinfo=data_dict[file_name]['crop_info']
        top, bottom, left, right = crop_iinfo['crop_box']
        record = {}
        for ann in anns:
            bbox = ann["bbox"]  # [x, y, w, h]
            x1, y1, x2, y2 = bbox[0], bbox[1], bbox[0] + bbox[2], bbox[1] + bbox[3]
            x1 = (x1 - left) / (right - left)
            y1 = (y1 - top) / (bottom - top)
            x2 = (x2 - left) / (right - left)
            y2 = (y2 - top) / (bottom - top)
            label = categories[ann["category_id"]]
            if label not in record:
                record[label] = []
            record[label].append([x1, y1, x2, y2])

        if file_name in new_data:
            new_data[file_name]["detection"] = record
        else:
            print(json.dumps(record, ensure_ascii=False, indent=2))
            raise ValueError(f"检测文件 {file_name} 未在分类文件中找到对应记录。")
    # for img_name, item in new_data.items():
    #     print(f"{img_name}: {json.dumps(item, ensure_ascii=False,indent=2)}")
    #     raise ValueError("检查输出，确认无误后可注释此行继续执行。")
    # ============ Step 5. 写回 data_dict ============ #
    updated_count = 0
    for img_name, item in new_data.items():
        if img_name not in data_dict:
            raise ValueError(f"{img_name} 不存在于主数据 data_path 中。")
        if item['classification']['text'][0] =='正常眼底':
            print(f"跳过正常眼底: {file_name}") #因为这个数据集大多是异常数据，正常很少，如果后续更正为正常，表示病灶很轻微，存在争议

            del data_dict[img_name]
            continue
        if file_name in rm_list:
            print(f"跳过移除文件: {file_name}")
            del data_dict[img_name]
            continue
        data_dict[img_name]["diagnosis"] = item
        # print(item)
        # raise
        updated_count += 1

    # ============ Step 6. 保存结果 ============ #
    save_path = args.data_path
    with open(save_path, "w", encoding="utf-8") as f:
        json.dump(data_dict, f, ensure_ascii=False, indent=2)
    print(f"✅ 已更新 {updated_count} 条记录并保存至 {save_path}")


if __name__ == "__main__":
    args = parse_args()
    draw_annotations(args)
