import json
from collections import Counter, OrderedDict


def save_classification_stats(annotations, save_path, indent=4):
    """
    功能:
        对综合标注文件 (annotations.json) 进行统计分析。
        包括题型分布、分类分布、分割分布、定位分布，以及图片总数。

    参数:
    -------
    annotations : dict
        图像级综合标注信息（一般由 gather_data() 返回）
    save_path : str
        输出 JSON 文件路径 (例如 experiments.json)
    indent : int, default=4
        JSON 缩进层级

    输出:
        保存一个统计结果 JSON 文件，返回统计信息字典。
    """

    # 初始化计数器
    task_counter = Counter()      # 统计有几张图属于哪种任务
    class_counter = Counter()     # 分类标签统计
    seg_counter = Counter()       # 分割任务的类别统计
    loc_counter = Counter()       # 定位任务的类别统计
    det_counter = Counter()       # 检测任务预留（暂不统计）

    total_images = len(annotations)
    no_diagnosis_count = 0

    for image_name, ann in annotations.items():
        diagnosis = ann.get("diagnosis", None)
        if diagnosis is None:
            no_diagnosis_count += 1
            continue

        # --- 题型分布 ---
        for task_type in ["classification", "segmentation", "detection", "location"]:
            if task_type in diagnosis:
                task_counter[task_type] += 1

        # --- 分类分布 ---
        if "classification" in diagnosis:
            for k, v in diagnosis["classification"].items():
                # 例如 "text: normal"
                diag_key = f"{k}: {v}"  
                class_counter[diag_key] += 1

        # --- 分割分布 ---
        if "segmentation" in diagnosis:
            for seg_type, path in diagnosis["segmentation"].items():
                seg_counter[seg_type] += 1

        # --- 定位分布 ---
        if "location" in diagnosis:
            for loc_type, coord in diagnosis["location"].items():
                loc_counter[loc_type] += 1

        # --- 检测分布 ---
        # 如果将来有 detection 任务可在此扩展
        # if "detection" in diagnosis:
        #     for det_type, bbox in diagnosis["detection"].items():
        #         det_counter[det_type] += 1

    # --- 排序输出 ---
    class_distribution_sorted = OrderedDict(
        sorted(class_counter.items(), key=lambda x: x[1], reverse=True)
    )
    seg_distribution_sorted = OrderedDict(
        sorted(seg_counter.items(), key=lambda x: x[1], reverse=True)
    )
    loc_distribution_sorted = OrderedDict(
        sorted(loc_counter.items(), key=lambda x: x[1], reverse=True)
    )

    # --- 汇总结果 ---
    stats = {
        "图片总数": total_images,
        "无诊断信息图片数": no_diagnosis_count,
        "题型分布": dict(task_counter),
        "分类分布": class_distribution_sorted,
        "分割类别分布": seg_distribution_sorted,
        "定位类别分布": loc_distribution_sorted,
    }

    # --- 写出文件 ---
    try:
        with open(save_path, "w", encoding="utf-8") as f:
            json.dump(stats, f, indent=indent, ensure_ascii=False)
        print(f"[INFO] 统计信息已保存到 {save_path}")
    except Exception as e:
        raise RuntimeError(f"写出统计文件失败: {save_path}, 错误信息: {e}") from e

    return stats
