# -*- coding: utf-8 -*-
"""
文件名: analyze_datasets_stat.py
功能:
  - 汇总 experiments/stat 中的所有统计 json
  - 检查 processed 数据集的完整性
  - 分析多模态与多疾病统计
  - 生成临时统计 tmp/*.json
"""

import os
import json
import glob
from tqdm import tqdm
from collections import Counter

stat_dir = "./experiments/stat"
tar_data_dir = "/home/zhangpinglu/data0/gy/Dataset/public_processed"
disease_config = "./configs/diseases_discription.json"
tmp_dir = "./experiments/Interim"
os.makedirs(tmp_dir, exist_ok=True)


# ========= 基础工具函数 =========
def safe_load_json(path):
    try:
        with open(path, "r", encoding="utf-8") as f:
            return json.load(f)
    except Exception as e:
        print(f"[ERROR] 无法读取 JSON: {path} -> {e}")
        return {}


# ========= 功能模块 =========
def check_tar_integrity(tar_data_dir):
    """检查processed数据集的文件存在性
    若发现文件缺失则直接 raise Exception 并打印关键路径
    """
    if not os.path.exists(tar_data_dir):
        raise FileNotFoundError(f"[ERROR] processed 数据目录不存在: {tar_data_dir}")

    for d in sorted(os.listdir(tar_data_dir)):
        dataset_path = os.path.join(tar_data_dir, d)
        anno_path = os.path.join(dataset_path, "annotations.json")
        if not os.path.exists(anno_path):
            print(f"[WARN] 跳过无 annotations.json 的目录: {d}")
            continue

        annos = safe_load_json(anno_path)
        for name, item in tqdm(annos.items(), desc=f"Checking {d}", unit="img"):
            img_p = item.get("image_path")
            ori_p = item.get("original_path")
            diag = item.get("diagnosis", {})
            seg = diag.get("segmentation", {})

            missing = []
            if img_p and not os.path.exists(img_p):
                missing.append(img_p)
            if ori_p and not os.path.exists(ori_p):
                missing.append(ori_p)
            for k, p in seg.items():
                if not os.path.exists(p):
                    missing.append(p)

            if missing:
                print(f"\n[ERROR] 数据完整性异常 in {d}/{name}:")
                for m in missing:
                    print(f"   Missing file: {m}")
                raise FileNotFoundError(f"❌ 数据集 {d} 存在缺失文件。请修复后再运行。")

    print("✅ 所有 processed 数据完整性检查通过！")


def analyze_stat_jsons(stat_dir):
    """分析 stat 目录中的各数据集总体情况"""
    stat_files = glob.glob(os.path.join(stat_dir, "*.json"))
    total = {
        "summary": {
            "total_images": 0,
            "has_classification": 0,
            "has_segmentation": 0,
            "has_location": 0,
        },
    }

    for file in stat_files:
        data = safe_load_json(file)

        total["summary"]["total_images"] += data.get("图片总数", 0)
        total["summary"]["has_classification"] += data.get("题型分布", {}).get("classification", 0)
        total["summary"]["has_segmentation"] += data.get("题型分布", {}).get("segmentation", 0)
        total["summary"]["has_location"] += data.get("题型分布", {}).get("location", 0)


    save_path = os.path.join(tmp_dir, "summary.json")
    with open(save_path, "w", encoding="utf-8") as f:
        json.dump(total, f, indent=4, ensure_ascii=False)
    print(f"✅ 保存总体统计 -> {save_path}")
    return total


def analyze_text_keys(stat_dir):
    """提取所有分类 text/quality 词汇、分割key、定位key，并统计数量"""
    text_set, quality_set, seg_set, loc_set = set(), set(), set(), set()

    for file in glob.glob(os.path.join(stat_dir, "*.json")):
        data = safe_load_json(file)
        cls = data.get("分类分布", {})
        for k in cls.keys():
            if k.startswith("text:"):
                text_set.add(k.split("text:")[-1].strip())
            elif k.startswith("quality:"):
                quality_set.add(k.split("quality:")[-1].strip())

        seg = data.get("分割类别分布", {})
        seg_set.update(seg.keys())

        loc = data.get("定位类别分布", {})
        loc_set.update(loc.keys())

    out = {
        "text_labels": sorted(list(text_set)),
        "quality_labels": sorted(list(quality_set)),
        "segmentation_keys": sorted(list(seg_set)),
        "localization_keys": sorted(list(loc_set)),
        "counts": {
            "text_labels": len(text_set),
            "quality_labels": len(quality_set),
            "segmentation_keys": len(seg_set),
            "localization_keys": len(loc_set),
        },
    }

    save_path = os.path.join(tmp_dir, "key_analysis.json")
    with open(save_path, "w", encoding="utf-8") as f:
        json.dump(out, f, indent=4, ensure_ascii=False)
    print(f"✅ 保存标注 key 统计 -> {save_path}")
    return out

def analyze_multimodal_relations(tar_data_dir):
    """
    高级统计：
      - 利用 configs/diseases_discription.json 判断多疾病映射（多中文条目视为多标签）
      - 统计多模态（分类+分割/定位）
      - 统计 top5 高频疾病及其余合计
      - 统计多模态中伴随出现的分割/定位 key 类型分布
    """
    disease_map = safe_load_json(disease_config)

    multimodal_count = 0
    multi_disease_samples = []
    disease_counter = Counter()
    co_occur_keys = Counter()

    for d in sorted(os.listdir(tar_data_dir)):
        anno_path = os.path.join(tar_data_dir, d, "annotations.json")
        if not os.path.exists(anno_path):
            continue
        annos = safe_load_json(anno_path)

        for img_name, item in annos.items():
            diag = item.get("diagnosis", {})
            has_cls = "classification" in diag
            has_seg = "segmentation" in diag
            has_loc = "localization" in diag

            # === 多模态判定 ===
            if has_cls and (has_seg or has_loc):
                multimodal_count += 1
                # 记录伴随的其他模态 key
                seg_keys = list(diag.get("segmentation", {}).keys())
                loc_keys = list(diag.get("localization", {}).keys())
                for k in seg_keys + loc_keys:
                    co_occur_keys[k] += 1

            # === 分类统计 ===
            if not has_cls:
                continue

            cls = diag.get("classification", {})
            text_val = cls.get("text", "")
            if not isinstance(text_val, str) or not text_val.strip():
                continue

            mapped = disease_map.get(text_val, None)
            if mapped is None:
                # 未找到映射，直接报错并提示
                raise ValueError(f"❌ 存在未映射的标签: '{text_val}' (图片: {img_name}, 数据集: {d})")

            # 确保映射是 list
            if isinstance(mapped, str):
                mapped = [mapped]

            # 若映射到多个中文疾病 -> 多疾病样本
            if len(mapped) > 1:
                multi_disease_samples.append(text_val)

            # === 基于中文映射的统计 ===
            for zh_name in mapped:
                disease_counter[zh_name] += 1

    # === 高频疾病 top5 ===
    most_common = disease_counter.most_common(5)
    other_sum = sum(disease_counter.values()) - sum(c for _, c in most_common)
    top_summary = {k: v for k, v in most_common}
    if other_sum > 0:
        top_summary["others"] = other_sum

    out = {
        "multi_modal_count": multimodal_count,
        "multi_disease_count": len(set(multi_disease_samples)),
        "multi_disease_samples": sorted(list(set(multi_disease_samples))),
        "top5_diseases": top_summary,
        "all_diseases_sorted": dict(sorted(disease_counter.items(), key=lambda x: x[1], reverse=True)),
        "co_occur_keys": dict(sorted(co_occur_keys.items(), key=lambda x: x[1], reverse=True)),
    }

    save_path = os.path.join(tmp_dir, "multimodal.json")
    with open(save_path, "w", encoding="utf-8") as f:
        json.dump(out, f, indent=4, ensure_ascii=False)
    print(f"✅ 多模态与疾病统计 -> {save_path}")
    return out



# ========= 主程序入口 =========
if __name__ == "__main__":
    print("=== 1️⃣ 检查 processed 数据集完整性 ===")
    # check_tar_integrity(tar_data_dir)
    
    print("\n=== 2️⃣ 分析 stat 目录 ===")
    analyze_stat_jsons(stat_dir)

    print("\n=== 3️⃣ 提取 key 信息 ===")
    analyze_text_keys(stat_dir)

    print("\n=== 4️⃣ 分析多模态与多疾病情况 ===")
    analyze_multimodal_relations(tar_data_dir)

    print(f"\n🎯 全部分析完成。结果保存在 {tmp_dir} 目录下。")
