import numpy as np
import os
import json
import matplotlib.pyplot as plt
from cleanlab.filter import find_label_issues

def analyze_labels(path2prob, path2label, output_dir="outputs"):
    """
    基于“本标签置信度”评估样本质量并输出统计与可视化。

    参数
    ----
    path2prob: dict[str, list[float]]
        图片路径 -> 模型对各类别的预测概率分布（与训练类别顺序一致）。
    path2label: dict[str, int]
        图片路径 -> 该图片的整数标签。
    output_dir: str
        输出目录（analysis.json、path2quality.json、直方图等会写到这里）。

    说明
    ----
    - 质量分只取本标签置信度：conf_i = pred_probs[i][label_i]。
    - 传入 cleanlab 时，会将非本标签的概率全部置 0，仅保留 conf_i，
      这样“疑似标签问题”的排序完全由本标签置信度决定（越低越可疑）。
    """
    os.makedirs(output_dir, exist_ok=True)

    # 对齐顺序
    image_paths = list(path2prob.keys())
    labels = np.array([int(path2label[p]) for p in image_paths], dtype=int)
    pred_probs = np.array([path2prob[p] for p in image_paths], dtype=float)  # shape = (N, C)

    # 仅保留“本标签置信度”
    idx = np.arange(len(labels))
    self_confidences = pred_probs[idx, labels]  # shape = (N,)

    # 构造仅含本标签分数的概率矩阵（其余类别强制为 0）
    pred_probs_masked = np.zeros_like(pred_probs)
    pred_probs_masked[idx, labels] = self_confidences

    # 用 cleanlab 按 self_confidence 排序找可疑样本（低置信度更可疑）
    issues = find_label_issues(
        labels=labels,
        pred_probs=pred_probs_masked,
        return_indices_ranked_by="self_confidence",
    )

    # 汇总 per-sample 质量信息（只记录本标签置信度）
    path2quality = {}
    for i, p in enumerate(image_paths):
        path2quality[p] = {
            "label": int(labels[i]),
            "confidence": float(self_confidences[i]),  # 仅本标签置信度
            "suspected_label_issue": (i in issues),
        }

    # 全局统计
    mean_conf = float(np.mean(self_confidences)) if len(self_confidences) else 0.0
    median_conf = float(np.median(self_confidences)) if len(self_confidences) else 0.0
    high_conf_count = int(np.sum(self_confidences > 0.9))
    suspected_issue_count = int(len(issues))
    analysis = {
        "num_samples": int(len(labels)),
        "mean_confidence": mean_conf,
        "median_confidence": median_conf,
        "confidence>0.9_count": high_conf_count,
        "suspected_label_issue_count": suspected_issue_count,
        "suspected_label_issue_image_paths": [image_paths[i] for i in issues],
        "note": "confidence 为本标签置信度；cleanlab 输入已将非本标签概率置零。",
    }

    # 保存 JSON
    with open(os.path.join(output_dir, "analysis.json"), "w", encoding="utf-8") as f:
        json.dump(analysis, f, ensure_ascii=False, indent=2)
    with open(os.path.join(output_dir, "path2quality.json"), "w", encoding="utf-8") as f:
        json.dump(path2quality, f, ensure_ascii=False, indent=2)

    print(f"🔍 标签质量分析已保存至 {os.path.join(output_dir, 'analysis.json')}")
    print(f"🔍 详细 path2quality 已保存至 {os.path.join(output_dir, 'path2quality.json')}")

    # 置信度直方图（本标签）
    if len(self_confidences) > 0:
        plt.figure(figsize=(7, 4))
        plt.hist(self_confidences, bins=50)
        plt.xlabel("本标签置信度", fontsize=12)
        plt.ylabel("样本数量", fontsize=12)
        plt.title("本标签置信度分布图", fontsize=14)
        plt.tight_layout()
        fig_path = os.path.join(output_dir, "confidence_dist.png")
        plt.savefig(fig_path)
        plt.close()
        print(f"📈 置信度分布图已保存至 {fig_path}")

    # 简要打印
    print(f"\n样本总数: {len(labels)}")
    print(f"平均本标签置信度: {mean_conf:.4f}")
    print(f"高置信度样本数量（>0.9）: {high_conf_count}")
    print(f"疑似标签错误样本数: {suspected_issue_count}")
    if len(issues) > 0:
        print(f"前10个可疑样本 path: {[image_paths[i] for i in issues[:10]]}")
    else:
        print("未检测到可疑标签样本。")
