# 基础库
import os
import sys
import json
import glob
import re
import time
import random
import datetime
import warnings
import logging
from collections import Counter, defaultdict

# 数据处理和科学计算
import numpy as np
import pandas as pd
import scipy as sp
from scipy import stats

# 可视化
import matplotlib.pyplot as plt
# import matplotlib.patches as patches
import seaborn as sns
from PIL import Image

# 深度学习相关
import torch
import torch.nn as nn
import torch.nn.functional as F
# from torch.nn.functional import roi_align

# 图像处理
import cv2

# 忽略警告
warnings.filterwarnings('ignore')

# 设置可视化风格
plt.style.use('seaborn-v0_8-whitegrid')
sns.set(style="whitegrid", palette="muted", color_codes=True)

def check_dataset_structure():
    import json
    import os
    from collections import Counter

    # 数据集根目录
    data_root = '/media/ross/8TB/project/lsh/dataset/microAlgea/microAlgaeOri/'

    # 检查标注文件是否存在
    train_ann_file = os.path.join(data_root, 'annotations/instances_train2017.json')
    val_ann_file = os.path.join(data_root, 'annotations/instances_val2017.json')
    test_ann_file = os.path.join(data_root, 'annotations/instances_test2017.json')

    ann_files = [
        ('train', train_ann_file),
        ('val', val_ann_file),
        ('test', test_ann_file)
    ]

    for split_name, ann_file in ann_files:
        if not os.path.exists(ann_file):
            print(f"错误: {split_name}集标注文件不存在: {ann_file}")
            continue

        # 加载标注文件
        try:
            with open(ann_file, 'r') as f:
                ann_data = json.load(f)

            # 检查必要的字段
            required_fields = ['images', 'annotations', 'categories']
            for field in required_fields:
                if field not in ann_data:
                    print(f"错误: {split_name}集标注文件缺少'{field}'字段")

            # 检查类别信息
            categories = ann_data.get('categories', [])
            category_ids = [cat['id'] for cat in categories]
            print(f"\n{split_name}集类别信息:")
            print(f"类别总数: {len(categories)}")
            print(f"类别ID列表: {category_ids}")

            # 检查类别ID是否连续
            if len(category_ids) > 0:
                min_id, max_id = min(category_ids), max(category_ids)
                expected_ids = set(range(min_id, max_id + 1))
                missing_ids = expected_ids - set(category_ids)
                if missing_ids:
                    print(f"警告: 类别ID不连续，缺少ID: {missing_ids}")

            # 检查标注中使用的类别ID
            if 'annotations' in ann_data:
                used_category_ids = [ann['category_id'] for ann in ann_data['annotations']]
                used_id_counter = Counter(used_category_ids)
                print(f"标注中使用的类别ID统计: {dict(used_id_counter)}")

                # 检查是否有超出定义范围的类别ID
                undefined_ids = set(used_category_ids) - set(category_ids)
                if undefined_ids:
                    print(f"错误: 标注中使用了未定义的类别ID: {undefined_ids}")

            # 检查图像文件是否存在
            if 'images' in ann_data:
                image_dir = os.path.join(data_root, f"{split_name}2017")
                missing_images = []
                for img_info in ann_data['images'][:10]:  # 只检查前10张图片
                    img_file = os.path.join(image_dir, img_info['file_name'])
                    if not os.path.exists(img_file):
                        missing_images.append(img_info['file_name'])

                if missing_images:
                    print(f"警告: 部分图像文件不存在，例如: {missing_images[:5]}")

        except json.JSONDecodeError:
            print(f"错误: {split_name}集标注文件不是有效的JSON格式")
        except Exception as e:
            print(f"检查{split_name}集时发生错误: {str(e)}")

    return "数据集结构检查完成"


check_dataset_structure()


def check_class_consistency():
    import json

    # 数据集根目录
    data_root = '/media/ross/8TB/project/lsh/dataset/microAlgea/microAlgaeOri/'

    # 从配置文件中获取模型类别数量
    model_num_classes = 10  # 根据您的配置文件

    # 加载训练集标注文件
    train_ann_file = os.path.join(data_root, 'annotations/instances_train2017.json')

    try:
        with open(train_ann_file, 'r') as f:
            ann_data = json.load(f)

        # 获取数据集类别数量
        dataset_categories = ann_data.get('categories', [])
        dataset_num_classes = len(dataset_categories)

        print(f"模型配置的类别数量: {model_num_classes}")
        print(f"数据集中的类别数量: {dataset_num_classes}")

        if model_num_classes != dataset_num_classes:
            print(f"警告: 模型配置的类别数量({model_num_classes})与数据集类别数量({dataset_num_classes})不一致!")

            # 如果数据集类别数量大于模型配置的类别数量
            if dataset_num_classes > model_num_classes:
                print("这可能导致类别索引超出范围错误，因为模型无法处理超出其配置范围的类别")
            else:
                print("模型配置的类别数量大于数据集类别数量，这可能导致部分类别没有训练数据")

        # 检查类别ID是否从0开始
        category_ids = [cat['id'] for cat in dataset_categories]
        min_category_id = min(category_ids) if category_ids else None

        if min_category_id is not None and min_category_id != 0:
            print(f"警告: 类别ID不是从0开始的，最小类别ID为{min_category_id}")
            print("DiffusionDet模型通常期望类别ID从0开始，这可能导致类别映射问题")

            # 打印类别ID到名称的映射
            id_to_name = {cat['id']: cat['name'] for cat in dataset_categories}
            print("类别ID到名称的映射:")
            for cat_id, cat_name in id_to_name.items():
                print(f"  ID {cat_id}: {cat_name}")

    except Exception as e:
        print(f"检查类别一致性时发生错误: {str(e)}")

    return "类别一致性检查完成"


check_class_consistency()


def check_evaluator_mapping():
    import json
    from mmdet.evaluation.metrics import CocoMetric

    # 数据集根目录
    data_root = '/media/ross/8TB/project/lsh/dataset/microAlgea/microAlgaeOri/'
    ann_file = os.path.join(data_root, 'annotations/instances_val2017.json')

    try:
        # 创建评估器实例
        evaluator = CocoMetric(ann_file=ann_file, metric='bbox')

        # 初始化评估器
        evaluator.dataset_meta = {'classes': None}  # 这会触发评估器从标注文件加载类别信息
        evaluator.cat_ids = evaluator._coco_api.getCatIds()

        print("评估器加载的类别ID:")
        print(evaluator.cat_ids)

        # 检查类别ID是否连续
        if len(evaluator.cat_ids) > 0:
            min_id, max_id = min(evaluator.cat_ids), max(evaluator.cat_ids)
            expected_ids = set(range(min_id, max_id + 1))
            missing_ids = expected_ids - set(evaluator.cat_ids)
            if missing_ids:
                print(f"警告: 评估器加载的类别ID不连续，缺少ID: {missing_ids}")

        # 检查模型输出的类别索引是否能正确映射
        model_max_class_idx = 9  # 基于模型配置的num_classes=10

        print("\n检查模型输出类别索引到评估器类别ID的映射:")
        for model_idx in range(model_max_class_idx + 1):
            if model_idx < len(evaluator.cat_ids):
                coco_cat_id = evaluator.cat_ids[model_idx]
                print(f"模型输出索引 {model_idx} -> COCO类别ID {coco_cat_id}")
            else:
                print(f"错误: 模型输出索引 {model_idx} 超出评估器类别ID列表范围!")

    except Exception as e:
        print(f"检查评估器映射时发生错误: {str(e)}")

    return "评估器类别映射检查完成"


check_evaluator_mapping()


def check_custom_evaluator():
    import inspect
    import sys

    # 尝试导入自定义评估器
    try:
        from projects.DiffusionDet.diffusiondet.evaluation import DensityCocoMetric

        # 检查评估器的results2json方法
        results2json_code = inspect.getsource(DensityCocoMetric.results2json)
        print("DensityCocoMetric.results2json方法的代码:")
        print(results2json_code)

        # 检查是否有类别索引范围检查
        if "if label < len(self.cat_ids)" in results2json_code:
            print("\n✅ 评估器包含类别索引范围检查，这有助于防止索引错误")
        else:
            print("\n⚠️ 警告: 评估器可能缺少类别索引范围检查，这可能导致索引错误")
            print("建议添加如下代码到results2json方法中:")
            print("""
            for label in range(len(result)):
                if label < len(self.cat_ids):
                    data['category_id'] = self.cat_ids[label]
                else:
                    # 使用最后一个有效类别ID
                    data['category_id'] = self.cat_ids[-1]
                    print(f"警告: 预测的标签 {label} 超出了类别ID范围")
            """)

    except ImportError:
        print("无法导入DensityCocoMetric评估器，请确保相关代码已正确安装")
    except Exception as e:
        print(f"检查自定义评估器时发生错误: {str(e)}")

    return "自定义评估器检查完成"


check_custom_evaluator()


def check_dataset_sampling():
    import json
    import numpy as np
    from collections import Counter

    # 数据集根目录
    data_root = '/media/ross/8TB/project/lsh/dataset/microAlgea/microAlgaeOri/'
    train_ann_file = os.path.join(data_root, 'annotations/instances_train2017.json')

    # 检查训练集采样设置
    train_indices = [i for i in range(0, 10000, 30)]  # 根据配置文件

    try:
        with open(train_ann_file, 'r') as f:
            ann_data = json.load(f)

        # 获取所有图像
        all_images = ann_data.get('images', [])
        all_annotations = ann_data.get('annotations', [])

        # 检查采样后的图像数量
        if len(all_images) <= max(train_indices):
            print(f"警告: 采样索引({max(train_indices)})超出了数据集图像总数({len(all_images)})")

        # 计算所有图像中的类别分布
        img_id_to_anns = {}
        for ann in all_annotations:
            img_id = ann['image_id']
            if img_id not in img_id_to_anns:
                img_id_to_anns[img_id] = []
            img_id_to_anns[img_id].append(ann)

        all_categories = []
        for img in all_images:
            img_id = img['id']
            if img_id in img_id_to_anns:
                for ann in img_id_to_anns[img_id]:
                    all_categories.append(ann['category_id'])

        all_category_dist = Counter(all_categories)

        # 计算采样后的类别分布
        sampled_images = [all_images[i] for i in train_indices if i < len(all_images)]
        sampled_categories = []

        for img in sampled_images:
            img_id = img['id']
            if img_id in img_id_to_anns:
                for ann in img_id_to_anns[img_id]:
                    sampled_categories.append(ann['category_id'])

        sampled_category_dist = Counter(sampled_categories)

        # 打印类别分布
        print("\n原始数据集类别分布:")
        for cat_id, count in sorted(all_category_dist.items()):
            print(f"类别ID {cat_id}: {count}个实例 ({count / sum(all_category_dist.values()) * 100:.2f}%)")

        print("\n采样后的类别分布:")
        for cat_id, count in sorted(sampled_category_dist.items()):
            original_count = all_category_dist.get(cat_id, 0)
            print(
                f"类别ID {cat_id}: {count}个实例 ({count / sum(sampled_category_dist.values()) * 100:.2f}%), 原始: {original_count}个")

        # 检查是否有类别在采样后消失
        missing_categories = set(all_category_dist.keys()) - set(sampled_category_dist.keys())
        if missing_categories:
            print(f"\n⚠️ 警告: 以下类别在采样后消失: {missing_categories}")

        # 检查类别分布变化
        distribution_changes = {}
        for cat_id in set(all_category_dist.keys()) | set(sampled_category_dist.keys()):
            original_ratio = all_category_dist.get(cat_id, 0) / sum(all_category_dist.values()) if sum(
                all_category_dist.values()) > 0 else 0
            sampled_ratio = sampled_category_dist.get(cat_id, 0) / sum(sampled_category_dist.values()) if sum(
                sampled_category_dist.values()) > 0 else 0
            distribution_changes[cat_id] = (sampled_ratio - original_ratio) * 100  # 百分比变化

        significant_changes = {k: v for k, v in distribution_changes.items() if abs(v) > 5}  # 变化超过5%
        if significant_changes:
            print("\n⚠️ 警告: 以下类别的分布比例变化显著:")
            for cat_id, change in sorted(significant_changes.items(), key=lambda x: abs(x[1]), reverse=True):
                print(f"类别ID {cat_id}: {'增加' if change > 0 else '减少'} {abs(change):.2f}%")

    except Exception as e:
        print(f"检查数据集采样时发生错误: {str(e)}")

    return "数据集采样检查完成"


check_dataset_sampling()


def visualize_dataset_samples():
    import json
    import matplotlib.pyplot as plt
    import matplotlib.patches as patches
    from PIL import Image
    import numpy as np
    import random

    # 数据集根目录
    data_root = '/media/ross/8TB/project/lsh/dataset/microAlgea/microAlgaeOri/'
    val_ann_file = os.path.join(data_root, 'annotations/instances_val2017.json')

    try:
        with open(val_ann_file, 'r') as f:
            ann_data = json.load(f)

        # 获取所有图像和标注
        all_images = {img['id']: img for img in ann_data.get('images', [])}

        # 按图像ID组织标注
        img_to_anns = {}
        for ann in ann_data.get('annotations', []):
            img_id = ann['image_id']
            if img_id not in img_to_anns:
                img_to_anns[img_id] = []
            img_to_anns[img_id].append(ann)

        # 获取类别信息
        categories = {cat['id']: cat for cat in ann_data.get('categories', [])}

        # 随机选择5张有标注的图像
        img_ids_with_anns = list(img_to_anns.keys())
        if not img_ids_with_anns:
            print("没有找到带标注的图像")
            return

        selected_img_ids = random.sample(img_ids_with_anns, min(5, len(img_ids_with_anns)))

        fig, axes = plt.subplots(len(selected_img_ids), 1, figsize=(10, 5 * len(selected_img_ids)))
        if len(selected_img_ids) == 1:
            axes = [axes]

        for i, img_id in enumerate(selected_img_ids):
            img_info = all_images[img_id]
            img_file = os.path.join(data_root, 'val2017', img_info['file_name'])

            try:
                img = Image.open(img_file)
                axes[i].imshow(np.array(img))
                axes[i].set_title(f"图像ID: {img_id}, 文件名: {img_info['file_name']}")

                # 绘制标注框
                for ann in img_to_anns[img_id]:
                    bbox = ann['bbox']  # [x, y, width, height]
                    category_id = ann['category_id']

                    # 获取类别名称
                    category_name = categories[category_id][
                        'name'] if category_id in categories else f"未知类别({category_id})"

                    # 创建矩形框
                    rect = patches.Rectangle(
                        (bbox[0], bbox[1]), bbox[2], bbox[3],
                        linewidth=2, edgecolor='r', facecolor='none'
                    )
                    axes[i].add_patch(rect)

                    # 添加类别标签
                    axes[i].text(
                        bbox[0], bbox[1] - 5,
                        f"{category_name} (ID:{category_id})",
                        color='white', fontsize=9,
                        bbox=dict(facecolor='red', alpha=0.5)
                    )

            except Exception as e:
                axes[i].text(0.5, 0.5, f"加载图像时出错: {str(e)}",
                             ha='center', va='center', fontsize=12)

        plt.tight_layout()
        plt.show()

    except Exception as e:
        print(f"可视化数据集样本时发生错误: {str(e)}")

    return "数据集样本可视化完成"


visualize_dataset_samples()


def generate_summary_report():
    import json
    import os

    # 数据集根目录
    data_root = '/media/ross/8TB/project/lsh/dataset/microAlgea/microAlgaeOri/'

    report = {
        "数据集路径": data_root,
        "模型配置": {
            "num_classes": 10,
            "使用ADEM": False,
            "使用LCM": True,
            "使用DDIM": False
        },
        "数据集采样": {
            "训练集": "每隔30个样本取1个",
            "验证集": "每隔10个样本取1个"
        },
        "潜在问题": []
    }

    # 检查标注文件是否存在
    train_ann_file = os.path.join(data_root, 'annotations/instances_train2017.json')
    val_ann_file = os.path.join(data_root, 'annotations/instances_val2017.json')
    test_ann_file = os.path.join(data_root, 'annotations/instances_test2017.json')

    for split_name, ann_file in [('训练集', train_ann_file), ('验证集', val_ann_file), ('测试集', test_ann_file)]:
        if not os.path.exists(ann_file):
            report["潜在问题"].append(f"{split_name}标注文件不存在: {ann_file}")
            continue

        # 加载标注文件并检查
        try:
            with open(ann_file, 'r') as f:
                ann_data = json.load(f)

            # 检查类别信息
            categories = ann_data.get('categories', [])
            category_ids = [cat['id'] for cat in categories]

            report[f"{split_name}类别数量"] = len(categories)
            report[f"{split_name}类别ID"] = category_ids

            # 检查类别ID是否连续
            if len(category_ids) > 0:
                min_id, max_id = min(category_ids), max(category_ids)
                expected_ids = set(range(min_id, max_id + 1))
                missing_ids = expected_ids - set(category_ids)
                if missing_ids:
                    report["潜在问题"].append(f"{split_name}类别ID不连续，缺少ID: {missing_ids}")

            # 检查类别ID是否从0开始
            if category_ids and min(category_ids) != 0:
                report["潜在问题"].append(f"{split_name}类别ID不是从0开始的，最小ID为{min(category_ids)}")

            # 检查类别ID是否超过配置的num_classes
            if max(category_ids) >= report["模型配置"]["num_classes"]:
                report["潜在问题"].append(
                    f"{split_name}最大类别ID({max(category_ids)})大于等于模型配置的num_classes({report['模型配置']['num_classes']})")

            # 检查标注中使用的类别ID
            if 'annotations' in ann_data:
                used_category_ids = set(ann['category_id'] for ann in ann_data['annotations'])
                undefined_ids = used_category_ids - set(category_ids)
                if undefined_ids:
                    report["潜在问题"].append(f"{split_name}标注中使用了未定义的类别ID: {undefined_ids}")

        except Exception as e:
            report["潜在问题"].append(f"检查{split_name}时发生错误: {str(e)}")

    # 检查模型配置与数据集的一致性
    if "训练集类别数量" in report and report["训练集类别数量"] != report["模型配置"]["num_classes"]:
        report["潜在问题"].append(
            f"模型配置的类别数量({report['模型配置']['num_classes']})与训练集类别数量({report['训练集类别数量']})不一致")

    # 生成解决方案建议
    report["解决方案建议"] = []

    if any("类别ID不是从0开始" in problem for problem in report["潜在问题"]):
        report["解决方案建议"].append("修改数据集标注文件，确保类别ID从0开始，或者在评估器中添加类别ID映射")

    if any("最大类别ID" in problem and "大于等于模型配置的num_classes" in problem for problem in report["潜在问题"]):
        report["解决方案建议"].append("增加模型配置中的num_classes值，使其大于数据集中的最大类别ID")

    if any("类别ID不连续" in problem for problem in report["潜在问题"]):
        report["解决方案建议"].append("在评估器中添加类别索引范围检查，确保预测的类别索引在有效范围内")

    # 打印报告
    print("\n===== 数据集检查报告 =====\n")
    for key, value in report.items():
        if key != "潜在问题" and key != "解决方案建议":
            print(f"{key}: {value}")

    print("\n----- 潜在问题 -----")
    if report["潜在问题"]:
        for i, problem in enumerate(report["潜在问题"], 1):
            print(f"{i}. {problem}")
    else:
        print("未发现明显问题")

    print("\n----- 解决方案建议 -----")
    if report["解决方案建议"]:
        for i, suggestion in enumerate(report["解决方案建议"], 1):
            print(f"{i}. {suggestion}")
    else:
        print("无需特别修改")

    return report


generate_summary_report()


# 2. 修改模型推理函数，添加类别索引安全检查
# def fix_inference_function():
#     return """
#     # 在projects/DiffusionDet/diffusiondet/models/diffusion_det.py中修改
#
#     def inference(self, box_cls, box_pred, cfg, device):
#         """
#     Arguments:
#     box_cls: tensor
#     of
#     shape(batch_size, num_boxes, K)
#     box_pred: tensor
#     of
#     shape(batch_size, num_boxes, 4)
#
#
# """
# assert len(box_cls) == len(box_pred)
#
# results = []
# for i, (scores_per_image, box_pred_per_image) in enumerate(zip(
#         box_cls, box_pred)):
#
#     result = self.inference_single_image(
#         scores_per_image, box_pred_per_image, cfg, device
#     )
#
#     # 添加安全检查，确保类别索引在有效范围内
#     if hasattr(result, 'labels') and hasattr(self, 'num_classes'):
#         result.labels = torch.clamp(result.labels, max=self.num_classes-1)
#
#     results.append(result)
#
# return results
# """


# 3. 修改ADEM模块中的密度分数计算
def fix_adem_module():
    return """
    # 在projects/DiffusionDet/diffusiondet/models/adem.py中修改

    def _extract_box_density(self, density_maps, boxes, img_shape, device):
        """
    从密度图中提取边界框对应的密度分数
    """
        h, w = img_shape[:2]
        density_scores = []

        # 使用多层特征图的密度预测
        for level, density_map in enumerate(density_maps):
            # 调整密度图大小以匹配原图尺寸
            density = F.interpolate(
                density_map, 
                size=(h, w), 
                mode='bilinear', 
                align_corners=False
            )

            # 归一化边界框坐标
            boxes_normalized = boxes.clone()
            boxes_normalized[:, [0, 2]] = boxes_normalized[:, [0, 2]] / w
            boxes_normalized[:, [1, 3]] = boxes_normalized[:, [1, 3]] / h

            # 使用RoIAlign提取边界框区域的密度特征
            box_densities = roi_align(
                density, 
                [boxes_normalized], 
                output_size=7,  # RoI池化输出大小
                spatial_scale=1.0,
                sampling_ratio=-1
            )

            # 计算每个框的平均密度
            box_avg_density = box_densities.mean(dim=[1, 2, 3])

            # 添加安全检查，处理无效值
            box_avg_density = torch.nan_to_num(box_avg_density, nan=0.0, posinf=1.0, neginf=0.0)

            density_scores.append(box_avg_density)

        # 合并多层密度分数
        if len(density_scores) > 1:
            final_density = torch.stack(density_scores).mean(dim=0)
        else:
            final_density = density_scores[0]

        # 限制密度分数范围，避免极端值
        final_density = torch.clamp(final_density, min=0.0, max=5.0)

        return final_density
    """


# 4. 修改ADEM推理函数
def fix_adem_inference():
    return """
    # 在projects/DiffusionDet/diffusiondet/models/diffusion_det.py中修改

    def _adem_inference(self, x, init_bboxes, image_size, device, img_metas, cfg):
        # ... 原有代码 ...

        # 使用密度图增强预测结果
        enhanced_logits = []
        for i in range(batch_size):
            cur_logits = pred_logits[-1][i]
            cur_bboxes = pred_bboxes[-1][i]

            # 从密度图中提取边界框对应的密度分数
            density_scores = self._extract_box_density(
                density_maps,
                cur_bboxes,
                img_metas[i]['img_shape'],
                device
            )

            # 使用密度分数增强类别预测分数
            density_weight = density_scores.unsqueeze(1).expand_as(cur_logits)

            # 添加安全检查，限制增强幅度
            enhancement = torch.clamp(
                self.adem_loss_weight * torch.log1p(density_weight),
                min=-5.0, max=5.0
            )

            enhanced_cur_logits = cur_logits + enhancement
            enhanced_logits.append(enhanced_cur_logits)

        # 使用增强后的logits进行最终预测
        enhanced_pred_logits = torch.stack(enhanced_logits)
        inference_result = self.inference(enhanced_pred_logits, pred_bboxes[-1], cfg, device)

        return inference_result
    """


def check_category_mapping():
    import json
    import os
    import torch

    # 数据集根目录
    data_root = '/media/ross/8TB/project/lsh/dataset/microAlgea/microAlgaeOri/'
    val_ann_file = os.path.join(data_root, 'annotations/instances_val2017.json')

    # 模型配置
    num_classes = 10  # 从配置文件中获取

    try:
        # 加载标注文件
        with open(val_ann_file, 'r') as f:
            ann_data = json.load(f)

        # 获取类别信息
        categories = ann_data.get('categories', [])
        category_ids = [cat['id'] for cat in categories]
        category_names = [cat['name'] for cat in categories]

        print("\n===== 类别ID与索引映射检查 =====\n")
        print(f"数据集中定义的类别数量: {len(categories)}")
        print(f"模型配置的类别数量: {num_classes}")

        # 打印类别信息
        print("\n类别信息:")
        for i, (cat_id, cat_name) in enumerate(zip(category_ids, category_names)):
            print(f"索引 {i}: ID = {cat_id}, 名称 = {cat_name}")

        # 检查类别ID是否连续
        if len(category_ids) > 0:
            min_id, max_id = min(category_ids), max(category_ids)
            expected_ids = set(range(min_id, max_id + 1))
            missing_ids = expected_ids - set(category_ids)
            if missing_ids:
                print(f"\n⚠️ 警告: 类别ID不连续，缺少ID: {missing_ids}")

        # 检查类别ID是否从0开始
        if category_ids and min(category_ids) != 0:
            print(f"\n⚠️ 警告: 类别ID不是从0开始的，最小ID为{min(category_ids)}")
            print("这可能导致模型预测与数据集类别之间的映射错误")

            # 模拟模型预测结果与数据集类别的映射
            print("\n模型预测索引到数据集类别的映射:")
            for pred_idx in range(num_classes):
                if pred_idx < len(category_ids):
                    dataset_cat_id = category_ids[pred_idx]
                    dataset_cat_name = category_names[pred_idx]
                    print(f"模型预测索引 {pred_idx} -> 数据集类别ID {dataset_cat_id} ('{dataset_cat_name}')")
                else:
                    print(f"模型预测索引 {pred_idx} -> 超出数据集类别范围!")

        # 检查评估时的类别映射
        print("\n评估时的类别映射:")
        for model_idx in range(num_classes):
            if model_idx < len(category_ids):
                coco_cat_id = category_ids[model_idx]
                coco_cat_name = category_names[model_idx]
                print(f"模型输出索引 {model_idx} -> COCO类别ID {coco_cat_id} ('{coco_cat_name}')")
            else:
                print(f"⚠️ 模型输出索引 {model_idx} 超出评估器类别ID列表范围!")

        # 提供解决方案建议
        print("\n解决方案建议:")
        if min(category_ids) != 0:
            print("1. 修改数据集标注文件，将类别ID从0开始重新编号")
            print("2. 在评估器中添加类别索引映射，确保模型预测的索引正确映射到数据集类别ID")

        if len(category_ids) < num_classes:
            print(f"3. 将模型配置中的num_classes从{num_classes}减少到{len(category_ids)}")
        elif len(category_ids) > num_classes:
            print(f"4. 将模型配置中的num_classes从{num_classes}增加到{len(category_ids)}")

    except Exception as e:
        print(f"检查类别映射时发生错误: {str(e)}")

    return "类别映射检查完成"


check_category_mapping()


def check_dataset_quality():
    import json
    import os
    import numpy as np
    from collections import defaultdict

    # 数据集根目录
    data_root = '/media/ross/8TB/project/lsh/dataset/microAlgea/microAlgaeOri/'
    train_ann_file = os.path.join(data_root, 'annotations/instances_train2017.json')

    try:
        # 加载标注文件
        with open(train_ann_file, 'r') as f:
            ann_data = json.load(f)

        # 获取图像和标注
        images = ann_data.get('images', [])
        annotations = ann_data.get('annotations', [])
        categories = {cat['id']: cat['name'] for cat in ann_data.get('categories', [])}

        print("\n===== 数据集质量检查 =====\n")
        print(f"图像总数: {len(images)}")
        print(f"标注总数: {len(annotations)}")
        print(f"类别总数: {len(categories)}")

        # 检查每个图像的标注数量
        img_to_anns = defaultdict(list)
        for ann in annotations:
            img_to_anns[ann['image_id']].append(ann)

        # 统计每个图像的标注数量
        ann_counts = [len(anns) for anns in img_to_anns.values()]

        # 计算统计信息
        if ann_counts:
            print("\n标注数量统计:")
            print(f"平均每张图像的标注数量: {np.mean(ann_counts):.2f}")
            print(f"最小标注数量: {min(ann_counts)}")
            print(f"最大标注数量: {max(ann_counts)}")
            print(f"标注数量中位数: {np.median(ann_counts)}")

            # 统计没有标注的图像
            images_without_anns = len(images) - len(img_to_anns)
            print(f"没有标注的图像数量: {images_without_anns} ({images_without_anns / len(images) * 100:.2f}%)")

        # 检查边界框大小
        bbox_areas = []
        bbox_aspect_ratios = []
        bbox_issues = []

        for ann in annotations:
            if 'bbox' in ann:
                bbox = ann['bbox']  # [x, y, width, height]

                # 检查边界框是否有效
                if len(bbox) != 4:
                    bbox_issues.append(f"标注ID {ann['id']}: 边界框格式错误")
                    continue

                if bbox[2] <= 0 or bbox[3] <= 0:
                    bbox_issues.append(f"标注ID {ann['id']}: 边界框宽度或高度小于等于0")
                    continue

                # 计算面积和宽高比
                area = bbox[2] * bbox[3]
                aspect_ratio = bbox[2] / bbox[3]

                bbox_areas.append(area)
                bbox_aspect_ratios.append(aspect_ratio)

                # 检查异常小的边界框
                if area < 10:
                    bbox_issues.append(f"标注ID {ann['id']}: 边界框面积过小 ({area:.2f}像素)")

                # 检查异常的宽高比
                if aspect_ratio > 10 or aspect_ratio < 0.1:
                    bbox_issues.append(f"标注ID {ann['id']}: 边界框宽高比异常 ({aspect_ratio:.2f})")

        # 打印边界框统计信息
        if bbox_areas:
            print("\n边界框统计:")
            print(f"平均面积: {np.mean(bbox_areas):.2f}像素")
            print(f"最小面积: {min(bbox_areas):.2f}像素")
            print(f"最大面积: {max(bbox_areas):.2f}像素")
            print(f"平均宽高比: {np.mean(bbox_aspect_ratios):.2f}")

            # 打印边界框面积分布
            area_percentiles = np.percentile(bbox_areas, [10, 25, 50, 75, 90])
            print("\n边界框面积分布:")
            print(f"10%分位: {area_percentiles[0]:.2f}像素")
            print(f"25%分位: {area_percentiles[1]:.2f}像素")
            print(f"50%分位: {area_percentiles[2]:.2f}像素")
            print(f"75%分位: {area_percentiles[3]:.2f}像素")
            print(f"90%分位: {area_percentiles[4]:.2f}像素")

        # 打印边界框问题
        if bbox_issues:
            print("\n发现边界框问题:")
            for i, issue in enumerate(bbox_issues[:10], 1):
                print(f"{i}. {issue}")

            if len(bbox_issues) > 10:
                print(f"... 还有 {len(bbox_issues) - 10} 个问题未显示")

        # 检查类别分布
        category_counts = defaultdict(int)
        for ann in annotations:
            category_id = ann.get('category_id')
            if category_id is not None:
                category_counts[category_id] += 1

        # 打印类别分布
        print("\n类别分布:")
        for cat_id, count in sorted(category_counts.items(), key=lambda x: x[1], reverse=True):
            cat_name = categories.get(cat_id, f"未知类别({cat_id})")
            print(f"{cat_name} (ID:{cat_id}): {count}个实例 ({count / len(annotations) * 100:.2f}%)")

        # 检查类别不平衡问题
        if category_counts:
            max_count = max(category_counts.values())
            min_count = min(category_counts.values())
            imbalance_ratio = max_count / min_count if min_count > 0 else float('inf')

            if imbalance_ratio > 10:
                print(f"\n⚠️ 警告: 类别严重不平衡，最多类别与最少类别的比例为 {imbalance_ratio:.2f}")
                print("这可能导致模型偏向于样本较多的类别，建议考虑数据增强或采样策略")

    except Exception as e:
        print(f"检查数据集质量时发生错误: {str(e)}")

    return "数据集质量检查完成"


check_dataset_quality()



