import json
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from collections import defaultdict, Counter
import cv2
from tqdm import tqdm
from sklearn.cluster import KMeans
from matplotlib.patches import Rectangle
import random

import os
import numpy as np
import matplotlib.pyplot as plt
from collections import defaultdict
import scipy.ndimage
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib.gridspec as gridspec  # 新增：用于高级布局控制

# 在代码开头添加这段代码，设置全局字体大小
import matplotlib as mpl
mpl.rcParams.update({
    'font.size': 14,          # 基础字体大小
    'axes.labelsize': 16,     # 轴标签字体大小
    'axes.titlesize': 18,     # 轴标题字体大小
    'xtick.labelsize': 14,    # x轴刻度标签字体大小
    'ytick.labelsize': 14,    # y轴刻度标签字体大小
    'legend.fontsize': 14,    # 图例字体大小
    'figure.titlesize': 20    # 图表标题字体大小
})

def load_coco_data(json_path):
    """加载COCO格式数据"""
    with open(json_path, 'r', encoding='utf-8') as f:
        data = json.load(f)
    return data


def analyze_bbox_distributions(coco_data):
    """分析边界框尺寸分布"""
    categories = {cat['id']: cat['name'] for cat in coco_data['categories']}

    # 按类别收集边界框尺寸
    bbox_data = defaultdict(lambda: {'widths': [], 'heights': [], 'areas': [], 'aspect_ratios': []})

    for ann in coco_data['annotations']:
        cat_id = ann['category_id']
        bbox = ann['bbox']  # [x, y, width, height]
        width, height = bbox[2], bbox[3]
        area = width * height
        aspect_ratio = width / height if height > 0 else 0

        bbox_data[cat_id]['widths'].append(width)
        bbox_data[cat_id]['heights'].append(height)
        bbox_data[cat_id]['areas'].append(area)
        bbox_data[cat_id]['aspect_ratios'].append(aspect_ratio)

    return bbox_data, categories


def plot_bbox_size_distributions(bbox_data, categories, output_dir):
    """绘制边界框尺寸分布图"""
    os.makedirs(output_dir, exist_ok=True)

    # 创建一个DataFrame来存储所有数据
    all_data = []
    for cat_id, data in bbox_data.items():
        cat_name = categories[cat_id]
        for i, (width, height, area, aspect) in enumerate(zip(
                data['widths'], data['heights'], data['areas'], data['aspect_ratios'])):
            all_data.append({
                'category': cat_name,
                'width': width,
                'height': height,
                'area': area,
                'aspect_ratio': aspect
            })

    df = pd.DataFrame(all_data)

    # Width and Height Scatter Plot (by Category)
    plt.figure(figsize=(20, 12))

    # 设置更美观的风格
    plt.style.use('seaborn-v0_8-whitegrid')

    # 使用更好的颜色方案
    categories_list = sorted(categories.values())
    colors = plt.cm.tab10(np.linspace(0, 1, len(categories_list)))

    # 过滤极端值但保留更多数据点
    # 使用较宽松的百分位数范围
    width_min, width_max = np.percentile(df['width'], [0.5, 99.9])
    height_min, height_max = np.percentile(df['height'], [0.5, 99.9])

    filtered_df = df[(df['width'] >= width_min) & (df['width'] <= width_max) &
                     (df['height'] >= height_min) & (df['height'] <= height_max)]

    # 绘制散点图
    for i, cat_name in enumerate(categories_list):
        cat_df = filtered_df[filtered_df['category'] == cat_name]
        plt.scatter(cat_df['width'], cat_df['height'],
                    s=15, alpha=0.7, label=cat_name, color=colors[i])

    # 设置坐标轴为对数刻度
    plt.xscale('log')
    plt.yscale('log')

    # 调整坐标轴范围，保留适当空白区域
    # 确保左下角有一些空白
    x_min = max(filtered_df['width'].min() * 0.5, 1)  # 保留更多左侧空白
    y_min = max(filtered_df['height'].min() * 0.5, 1)  # 保留更多底部空白

    plt.xlim(x_min, filtered_df['width'].max() * 1.1)
    plt.ylim(y_min, filtered_df['height'].max() * 1.1)

    # 增大字体大小
    # plt.title('Bounding Box Width vs Height Distribution by Category', fontsize=24)  # 从20增加到24
    plt.xlabel('Width (Log Scale)', fontsize=24)  # 从18增加到20
    plt.ylabel('Height (Log Scale)', fontsize=24)  # 从18增加到20
    plt.xticks(fontsize=24)  # 从16增加到18
    plt.yticks(fontsize=24)  # 从16增加到18

    # 将图例保留在图内左上方
    plt.legend(fontsize=24, loc='upper left', markerscale=3.0)  # 从14增加到16

    plt.grid(True, alpha=0.3)
    plt.tight_layout()
    plt.savefig(os.path.join(output_dir, 'paper_bbox_width_height_scatter.png'), dpi=300)
    plt.close()

    return df


def plot_bbox_area_distribution(bbox_data, categories, output_dir):
    """绘制边界框尺寸分布图"""
    os.makedirs(output_dir, exist_ok=True)

    # 创建一个DataFrame来存储所有数据
    all_data = []
    for cat_id, data in bbox_data.items():
        cat_name = categories[cat_id]
        for i, (width, height, area, aspect) in enumerate(zip(
                data['widths'], data['heights'], data['areas'], data['aspect_ratios'])):
            all_data.append({
                'category': cat_name,
                'width': width,
                'height': height,
                'area': area,
                'aspect_ratio': aspect
            })

    df = pd.DataFrame(all_data)

    # 1. Area Distribution (Log Scale)
    plt.figure(figsize=(16, 10))
    sns.boxplot(x='category', y='area', data=df)
    plt.yscale('log')
    plt.title('Bounding Box Area Distribution by Category (Log Scale)', fontsize=24)
    plt.xlabel('Category', fontsize=24)
    plt.ylabel('Area (Log Scale)', fontsize=24)
    plt.xticks(rotation=45, ha='right')
    plt.tight_layout()
    plt.savefig(os.path.join(output_dir, 'paper_bbox_area_distribution.png'), dpi=300)
    plt.close()
def analyze_spatial_distribution(coco_data, image_dir, output_dir):
    """分析标注的空间分布并生成美观的热图，针对十个类别优化布局，使用hot颜色映射"""
    import os
    import numpy as np
    import matplotlib.pyplot as plt
    import matplotlib.gridspec as gridspec
    from collections import defaultdict
    import scipy.ndimage

    os.makedirs(output_dir, exist_ok=True)

    # 创建图像ID到图像尺寸的映射
    image_sizes = {img['id']: (img['width'], img['height']) for img in coco_data['images']}

    # 按类别收集标注的中心点位置
    category_centers = defaultdict(lambda: {'x': [], 'y': [], 'norm_x': [], 'norm_y': []})

    for ann in coco_data['annotations']:
        cat_id = ann['category_id']
        bbox = ann['bbox']  # [x, y, width, height]
        image_id = ann['image_id']

        # 获取图像尺寸
        if image_id in image_sizes:
            img_width, img_height = image_sizes[image_id]

            # 计算边界框中心点
            center_x = bbox[0] + bbox[2] / 2
            center_y = bbox[1] + bbox[3] / 2

            # 归一化坐标 (0-1范围)
            norm_x = center_x / img_width
            norm_y = center_y / img_height

            category_centers[cat_id]['x'].append(center_x)
            category_centers[cat_id]['y'].append(center_y)
            category_centers[cat_id]['norm_x'].append(norm_x)
            category_centers[cat_id]['norm_y'].append(norm_y)

    # 获取类别信息
    categories = {cat['id']: cat['name'] for cat in coco_data['categories']}

    # 使用白色背景的绘图风格
    plt.style.use('default')

    # 针对十个类别的最佳布局：5×2 布局
    n_rows, n_cols = 5, 2

    # 创建具有更紧凑间距的子图网格
    fig = plt.figure(figsize=(12, 20), dpi=300, facecolor='white')

    # 使用GridSpec创建更紧凑的布局，减少列间距
    # 这里我们明确设置wspace参数来控制列间距
    gs = gridspec.GridSpec(n_rows + 1, n_cols, height_ratios=[1, 1, 1, 1, 1, 0.1], wspace=0.2,hspace=0.4)

    # 设置整体标题
    fig.suptitle('Spatial Distribution of Algae Species', fontsize=22, y=1, fontweight='bold')

    # 使用hot颜色映射，与您原始代码一致
    custom_cmap = plt.cm.hot

    # 对类别进行排序，确保有意义的顺序
    sorted_categories = sorted(category_centers.items(), key=lambda x: x[0])

    # 创建一个规范化对象，用于所有热图共享相同的颜色比例
    all_densities = []
    for cat_id, centers in sorted_categories:
        if len(centers['norm_x']) > 5:
            heatmap, _, _ = np.histogram2d(
                centers['norm_x'], centers['norm_y'],
                bins=80, range=[[0, 1], [0, 1]]
            )
            heatmap = scipy.ndimage.gaussian_filter(heatmap, sigma=1.5)
            if heatmap.max() > 0:
                all_densities.append(heatmap.max())

    # 如果有有效的密度值，创建一个统一的规范化器
    if all_densities:
        vmax = max(all_densities)
        # 使用标准规范化，与您原始代码一致
        norm = plt.Normalize(0, vmax)
    else:
        norm = plt.Normalize(0, 1)

    # 创建子图并绘制热图
    for i, (cat_id, centers) in enumerate(sorted_categories):
        # 计算当前子图的行和列位置
        row = i // n_cols
        col = i % n_cols

        # 创建子图
        ax = fig.add_subplot(gs[row, col])

        # 确保有足够的数据点
        if len(centers['norm_x']) > 5:
            # 创建热图，增加分辨率
            heatmap, xedges, yedges = np.histogram2d(
                centers['norm_x'], centers['norm_y'],
                bins=80, range=[[0, 1], [0, 1]]
            )

            # 应用高斯模糊以平滑热图
            sigma = 1.5  # 调整平滑程度
            heatmap = scipy.ndimage.gaussian_filter(heatmap, sigma)

            # 绘制热图，使用hot颜色映射
            im = ax.imshow(heatmap.T, cmap=custom_cmap, origin='lower',
                           extent=[0, 1, 0, 1], aspect='equal',
                           interpolation='bilinear', norm=norm)

            # 添加网格线以增强可读性
            ax.grid(color='gray', linestyle='--', linewidth=0.5, alpha=0.3)
        else:
            # 如果数据点太少，显示提示信息
            ax.text(0.5, 0.5, f"Insufficient data\nfor {categories[cat_id]}",
                    ha='center', va='center', fontsize=12)

        # 设置标题和轴标签
        ax.set_title(categories[cat_id], fontsize=16, pad=10, fontweight='bold')

        # 减少轴标签的重复，只在左侧和底部显示标签
        if col == 0:  # 只在左列显示Y轴标签
            ax.set_ylabel('Normalized Y Coordinate', fontsize=14)
        else:
            ax.set_ylabel('')

        if row == n_rows - 1:  # 只在底行显示X轴标签
            ax.set_xlabel('Normalized X Coordinate', fontsize=14)
        else:
            ax.set_xlabel('')

        # 添加刻度标签
        ax.set_xticks([0, 0.25, 0.5, 0.75, 1.0])
        ax.set_yticks([0, 0.25, 0.5, 0.75, 1.0])
        ax.tick_params(axis='both', labelsize=12)  # 增加刻度标签字体大小

        # 在四周添加边框
        for spine in ax.spines.values():
            spine.set_visible(True)
            spine.set_color('black')
            spine.set_linewidth(1)

    # 创建共用的颜色条
    cbar_ax = fig.add_subplot(gs[-1, :])
    cbar = plt.colorbar(im, cax=cbar_ax, orientation='horizontal')
    cbar.set_label('Density', fontsize=14, labelpad=10)  # 移除了"log scale"文本

    # 调整布局，为顶部标题和底部颜色条留出空间
    # 不使用tight_layout，因为我们已经通过GridSpec控制了间距
    plt.subplots_adjust(top=0.96, bottom=0.08, left=0.1, right=0.95)

    # 保存高分辨率图像
    plt.savefig(os.path.join(output_dir, 'spatial_distribution_heatmap.png'),
                dpi=600, bbox_inches='tight', facecolor='white')
    plt.close()

    return category_centers


def analyze_annotation_density(coco_data, output_dir):
    """分析标注密度"""
    os.makedirs(output_dir, exist_ok=True)

    # 创建图像ID到类别计数的映射
    image_category_counts = defaultdict(lambda: defaultdict(int))

    for ann in coco_data['annotations']:
        cat_id = ann['category_id']
        image_id = ann['image_id']
        image_category_counts[image_id][cat_id] += 1

    # 计算每个类别的每张图像标注数量分布
    categories = {cat['id']: cat['name'] for cat in coco_data['categories']}
    category_counts = defaultdict(list)

    for image_id, cat_counts in image_category_counts.items():
        for cat_id, count in cat_counts.items():
            category_counts[cat_id].append(count)

    # 绘制每个类别的标注密度分布
    plt.figure(figsize=(20, 12))

    data = []
    labels = []

    for cat_id, counts in category_counts.items():
        if counts:  # 确保有数据
            data.append(counts)
            labels.append(categories[cat_id])

    # 使用小提琴图显示分布
    parts = plt.violinplot(data, showmeans=True, showmedians=True)

    # 设置颜色
    for i, pc in enumerate(parts['bodies']):
        pc.set_facecolor(plt.cm.tab10(i / len(data)))
        pc.set_alpha(0.7)

    # 设置标签
    plt.xticks(np.arange(1, len(labels) + 1), labels, rotation=45, ha='right',fontsize=24)
    # plt.title('Distribution of Annotations per Image by Category', fontsize=24)
    plt.xticks(fontsize=24)
    plt.yticks(fontsize=24)
    plt.ylabel('Number of Annotations per Image', fontsize=24)
    plt.yscale('log')  # 使用对数尺度更好地显示分布
    plt.grid(True, alpha=0.3)
    plt.tight_layout()

    plt.savefig(os.path.join(output_dir, 'paper_annotation_density_distribution.png'), dpi=300)
    plt.close()

    # 计算并返回每个类别的密度统计
    density_stats = {}
    for cat_id, counts in category_counts.items():
        if counts:
            density_stats[cat_id] = {
                'name': categories[cat_id],
                'min': min(counts),
                'max': max(counts),
                'mean': np.mean(counts),
                'median': np.median(counts),
                'std': np.std(counts),
                'count_distribution': Counter(counts)
            }

    return density_stats


def analyze_anchor_clusters(bbox_data, categories, output_dir, n_clusters=5):
    """使用K-means分析每个类别的最佳锚框尺寸"""
    os.makedirs(output_dir, exist_ok=True)

    anchor_results = {}

    for cat_id, data in bbox_data.items():
        widths = np.array(data['widths'])
        heights = np.array(data['heights'])

        # 确保有足够的数据进行聚类
        if len(widths) >= n_clusters:
            # 准备数据进行聚类
            X = np.column_stack((widths, heights))

            # 应用K-means聚类
            kmeans = KMeans(n_clusters=n_clusters, random_state=42, n_init=10)
            kmeans.fit(X)

            # 获取聚类中心（锚框）
            anchors = kmeans.cluster_centers_

            # 计算每个聚类的数据点数量
            cluster_counts = np.bincount(kmeans.labels_)

            # 按面积排序锚框
            anchor_areas = anchors[:, 0] * anchors[:, 1]
            sorted_indices = np.argsort(anchor_areas)
            anchors = anchors[sorted_indices]
            cluster_counts = cluster_counts[sorted_indices]

            anchor_results[cat_id] = {
                'anchors': anchors,
                'counts': cluster_counts,
                'total': len(widths)
            }

    # 绘制每个类别的锚框
    plt.figure(figsize=(16, 16))

    for i, (cat_id, result) in enumerate(anchor_results.items()):
        cat_name = categories[cat_id]
        anchors = result['anchors']
        counts = result['counts']
        total = result['total']

        # 创建子图
        ax = plt.subplot(4, 3, i + 1)

        # 设置合适的坐标范围
        max_width = np.max(anchors[:, 0]) * 1.2
        max_height = np.max(anchors[:, 1]) * 1.2

        ax.set_xlim(0, max_width)
        ax.set_ylim(0, max_height)

        # 绘制锚框
        for j, (width, height) in enumerate(anchors):
            percentage = counts[j] / total * 100
            rect = Rectangle((0, 0), width, height, linewidth=2,
                             edgecolor=plt.cm.tab10(j / len(anchors)),
                             facecolor='none')
            ax.add_patch(rect)
            ax.text(width / 2, height / 2, f"{j + 1}: {percentage:.1f}%",
                    ha='center', va='center', fontsize=10)

        ax.set_title(f"{cat_name}", fontsize=12)
        ax.set_xlabel('widdth', fontsize=10)
        ax.set_ylabel('height', fontsize=10)
        ax.grid(True, alpha=0.3)

    plt.tight_layout()
    plt.savefig(os.path.join(output_dir, 'anchor_clusters.png'), dpi=300)
    plt.close()

    return anchor_results


# def analyze_class_imbalance(coco_data, output_dir):
#     """分析类别不平衡问题"""
#     os.makedirs(output_dir, exist_ok=True)
#
#     categories = {cat['id']: cat['name'] for cat in coco_data['categories']}
#
#     # 统计图像和标注数量
#     image_counts = defaultdict(int)
#     annotation_counts = defaultdict(int)
#
#     # 收集每个图像包含的类别
#     image_categories = defaultdict(set)
#
#     for ann in coco_data['annotations']:
#         cat_id = ann['category_id']
#         image_id = ann['image_id']
#
#         annotation_counts[cat_id] += 1
#         image_categories[image_id].add(cat_id)
#
#     # 统计每个类别出现在多少张图像中
#     for image_id, cats in image_categories.items():
#         for cat_id in cats:
#             image_counts[cat_id] += 1
#
#     # 计算每个类别的图像和标注比例
#     total_images = len(coco_data['images'])
#     total_annotations = len(coco_data['annotations'])
#
#     imbalance_stats = {}
#     for cat_id in categories:
#         imbalance_stats[cat_id] = {
#             'name': categories[cat_id],
#             'image_count': image_counts[cat_id],
#             'image_percentage': image_counts[cat_id] / total_images * 100,
#             'annotation_count': annotation_counts[cat_id],
#             'annotation_percentage': annotation_counts[cat_id] / total_annotations * 100,
#             'annotations_per_image': annotation_counts[cat_id] / image_counts[cat_id] if image_counts[cat_id] > 0 else 0
#         }
#
#     # 绘制类别不平衡图
#     plt.figure(figsize=(16, 10))
#
#     cat_names = [imbalance_stats[cat_id]['name'] for cat_id in sorted(imbalance_stats.keys())]
#     img_percentages = [imbalance_stats[cat_id]['image_percentage'] for cat_id in sorted(imbalance_stats.keys())]
#     ann_percentages = [imbalance_stats[cat_id]['annotation_percentage'] for cat_id in sorted(imbalance_stats.keys())]
#
#     x = np.arange(len(cat_names))
#     width = 0.35
#
#     plt.bar(x - width / 2, img_percentages, width, label='Image Percentage (%)')
#     plt.bar(x + width / 2, ann_percentages, width, label='Annotation Percentage (%)')
#
#     plt.title('Image and Annotation Percentages by Category', fontsize=16)
#     plt.xlabel('Category', fontsize=14)
#     plt.ylabel('Percentage (%)', fontsize=14)
#     plt.xticks(x, cat_names, rotation=45, ha='right')
#     plt.legend()
#     plt.grid(True, alpha=0.3)
#     plt.tight_layout()
#
#     plt.savefig(os.path.join(output_dir, 'class_imbalance.png'), dpi=300)
#     plt.close()
#
#     # 绘制每张图像的平均标注数
#     plt.figure(figsize=(16, 10))
#
#     annotations_per_image = [imbalance_stats[cat_id]['annotations_per_image'] for cat_id in
#                              sorted(imbalance_stats.keys())]
#
#     plt.bar(x, annotations_per_image)
#
#     plt.title('Average Annotations per Image by Category', fontsize=16)
#     plt.xlabel('Category', fontsize=14)
#     plt.ylabel('Average Annotations per Image', fontsize=14)
#     plt.xticks(x, cat_names, rotation=45, ha='right')
#     plt.grid(True, alpha=0.3)
#     plt.tight_layout()
#
#     plt.savefig(os.path.join(output_dir, 'annotations_per_image.png'), dpi=300)
#     plt.close()
#
#     return imbalance_stats

def analyze_class_imbalance(coco_data, output_dir):
    """分析类别不平衡问题，生成美化后的适合学术论文的图表"""
    os.makedirs(output_dir, exist_ok=True)

    categories = {cat['id']: cat['name'] for cat in coco_data['categories']}

    # 统计图像和标注数量
    image_counts = defaultdict(int)
    annotation_counts = defaultdict(int)

    # 收集每个图像包含的类别
    image_categories = defaultdict(set)

    for ann in coco_data['annotations']:
        cat_id = ann['category_id']
        image_id = ann['image_id']

        annotation_counts[cat_id] += 1
        image_categories[image_id].add(cat_id)

    # 统计每个类别出现在多少张图像中
    for image_id, cats in image_categories.items():
        for cat_id in cats:
            image_counts[cat_id] += 1

    # 计算每个类别的图像和标注比例
    total_images = len(coco_data['images'])
    total_annotations = len(coco_data['annotations'])

    imbalance_stats = {}
    for cat_id in categories:
        img_pct = image_counts[cat_id] / total_images * 100
        ann_pct = annotation_counts[cat_id] / total_annotations * 100

        # 计算不平衡比率 (annotation % / image %)
        imbalance_ratio = ann_pct / img_pct if img_pct > 0 else 0

        imbalance_stats[cat_id] = {
            'name': categories[cat_id],
            'image_count': image_counts[cat_id],
            'image_percentage': img_pct,
            'annotation_count': annotation_counts[cat_id],
            'annotation_percentage': ann_pct,
            'annotations_per_image': annotation_counts[cat_id] / image_counts[cat_id] if image_counts[
                                                                                             cat_id] > 0 else 0,
            'imbalance_ratio': imbalance_ratio
        }

    # 按照图像百分比排序（从低到高）
    sorted_cat_ids = sorted(imbalance_stats.keys(), key=lambda x: imbalance_stats[x]['image_percentage'])

    # 设置更专业的绘图风格
    plt.style.use('seaborn-v0_8-whitegrid')

    # 设置更专业的颜色
    img_color = '#3274A1'  # 深蓝色
    ann_color = '#E1812C'  # 橙色

    # 定义统一的字体大小
    FONT_SIZES = {
        'title': 24,           # 标题字体大小
        'subtitle': 24,        # 子标题字体大小
        'legend': 18,          # 图例字体大小
        'legend_title': 22,    # 图例标题字体大小
        'pie_text': 16,        # 饼图文本字体大小
        'center_text': 16      # 中心文本字体大小
    }

    # 额外添加：生成美化版饼图展示类别分布
    fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 12), dpi=300, facecolor='white')
    fig.suptitle('(left)Distribution of Annotations by Category\n(right)Distribution of Images by Category', fontsize=FONT_SIZES['title'], fontweight='bold', y=0.98)

    # 准备数据 - 合并小类别为"Others"
    threshold = 0.0  # 小于此百分比的类别将被合并为"Others"
    img_sizes = [imbalance_stats[cat_id]['image_count'] for cat_id in sorted_cat_ids]
    img_percentages = [imbalance_stats[cat_id]['image_percentage'] for cat_id in sorted_cat_ids]
    ann_sizes = [imbalance_stats[cat_id]['annotation_count'] for cat_id in sorted_cat_ids]
    ann_percentages = [imbalance_stats[cat_id]['annotation_percentage'] for cat_id in sorted_cat_ids]

    # 创建自定义颜色映射 - 使用更好看的配色方案
    colors = plt.cm.Spectral(np.linspace(0, 1, len(sorted_cat_ids)))
    np.random.shuffle(colors)  # 随机打乱颜色以增加区分度

    # 图像分布饼图 - 处理小类别
    img_data = []
    img_labels = []
    img_colors = []
    other_img_size = 0
    other_img_count = 0

    for i, (size, pct, cat_id) in enumerate(zip(img_sizes, img_percentages, sorted_cat_ids)):
        if pct >= threshold:
            name = imbalance_stats[cat_id]['name'].replace('_', ' ')
            img_data.append(size)
            img_labels.append(f"{name} ({size}, {pct:.1f}%)")
            img_colors.append(colors[i])
        else:
            other_img_size += size
            other_img_count += 1

    # 如果有"Others"类别，添加它
    if other_img_count > 0:
        img_data.append(other_img_size)
        img_labels.append(f"Others ({other_img_count} classes, {other_img_size} images)")
        img_colors.append('gray')

    # 绘制图像分布饼图
    wedges, texts, autotexts = ax1.pie(
        img_data,
        autopct=lambda pct: f'{pct:.1f}%' if pct > 3 else '',  # 只在较大的扇区显示百分比
        startangle=90,
        colors=img_colors,
        wedgeprops={
            'edgecolor': 'white',
            'linewidth': 1.5,
            'antialiased': True,
            'width': 0.8  # 设置为0.5以创建环形图
        },
        shadow=True,
        explode=[0.05 if i < 3 else 0 for i in range(len(img_data))],  # 突出显示前三大类
        pctdistance=0.85,
        radius=1.2
    )

    # 设置自动文本的样式 - 使用与右侧相同的字体大小
    for autotext in autotexts:
        autotext.set_fontsize(FONT_SIZES['pie_text'])
        autotext.set_fontweight('bold')
        autotext.set_color('white')

    # 添加中心圆以创建环形图效果
    centre_circle = plt.Circle((0, 0), 0.3, fc='white')
    ax1.add_patch(centre_circle)

    # 在中心添加总数
    ax1.text(0, 0, f'Total\n{sum(img_data)}\nImages',
             ha='center', va='center', fontsize=FONT_SIZES['center_text'], fontweight='bold')

    # ax1.set_title('Distribution of Images by Category', fontsize=FONT_SIZES['subtitle'], fontweight='bold')

    # 为图像饼图添加图例，使用自定义布局 - 使用与右侧相同的字体大小
    ax1.legend(
        wedges,
        img_labels,
        title="Categories",
        loc="center left",
        bbox_to_anchor=(1.05, 0.5),
        fontsize=FONT_SIZES['legend'],
        title_fontsize=FONT_SIZES['legend_title'],
        frameon=True,
        framealpha=0.9,
        edgecolor='gray'
    )

    # 标注分布饼图 - 处理小类别
    ann_data = []
    ann_labels = []
    ann_colors = []
    other_ann_size = 0
    other_ann_count = 0

    for i, (size, pct, cat_id) in enumerate(zip(ann_sizes, ann_percentages, sorted_cat_ids)):
        if pct >= threshold:
            name = imbalance_stats[cat_id]['name'].replace('_', ' ')
            ann_data.append(size)
            ann_labels.append(f"{name} ({size}, {pct:.1f}%)")
            ann_colors.append(colors[i])
        else:
            other_ann_size += size
            other_ann_count += 1

    # 如果有"Others"类别，添加它
    if other_ann_count > 0:
        ann_data.append(other_ann_size)
        ann_labels.append(f"Others ({other_ann_count} classes, {other_ann_size} annotations)")
        ann_colors.append('gray')

    # 绘制标注分布饼图
    wedges, texts, autotexts = ax2.pie(
        ann_data,
        autopct=lambda pct: f'{pct:.1f}%' if pct > 3 else '',  # 只在较大的扇区显示百分比
        startangle=90,
        colors=ann_colors,
        wedgeprops={
            'edgecolor': 'white',
            'linewidth': 1.5,
            'antialiased': True,
            'width': 0.8  # 设置为0.5以创建环形图
        },
        shadow=True,
        explode=[0.05 if i < 3 else 0 for i in range(len(ann_data))],  # 突出显示前三大类
        pctdistance=0.85,
        radius=1.2
    )

    # 设置自动文本的样式
    for autotext in autotexts:
        autotext.set_fontsize(FONT_SIZES['pie_text'])
        autotext.set_fontweight('bold')
        autotext.set_color('white')

    # 添加中心圆以创建环形图效果
    centre_circle = plt.Circle((0, 0), 0.3, fc='white')
    ax2.add_patch(centre_circle)

    # 在中心添加总数
    ax2.text(0, 0, f'Total\n{sum(ann_data)}\nAnnotations',
             ha='center', va='center', fontsize=FONT_SIZES['center_text'], fontweight='bold')

    # ax2.set_title('Distribution of Annotations by Category', fontsize=FONT_SIZES['subtitle'], fontweight='bold', pad=20)

    # 为标注饼图添加图例，使用自定义布局
    ax2.legend(
        wedges,
        ann_labels,
        title="Categories",
        loc="center left",
        bbox_to_anchor=(1.05, 0.5),
        fontsize=FONT_SIZES['legend'],
        title_fontsize=FONT_SIZES['legend_title'],
        frameon=True,
        framealpha=0.9,
        edgecolor='gray'
    )

    # 添加整体背景色和边框
    fig.patch.set_facecolor('#f8f9fa')
    for ax in [ax1, ax2]:
        ax.set_facecolor('#f8f9fa')

    # 调整布局并保存
    plt.tight_layout()
    plt.subplots_adjust(top=1.4, wspace=1.5)  # 为总标题留出空间
    plt.savefig(os.path.join(output_dir, 'paper_category_distribution_pie.png'), dpi=300, bbox_inches='tight')
    plt.close()




def analyze_image_quality(coco_data, image_dir, output_dir, sample_size=100):
    """分析图像质量"""
    os.makedirs(output_dir, exist_ok=True)

    # 随机选择样本图像
    images = coco_data['images']
    if len(images) > sample_size:
        sample_images = random.sample(images, sample_size)
    else:
        sample_images = images

    # 收集图像质量指标
    quality_metrics = {
        'brightness': [],
        'contrast': [],
        'sharpness': [],
        'resolution': [],
        'aspect_ratios': []
    }

    for img in tqdm(sample_images, desc="分析图像质量"):
        image_path = os.path.join(image_dir, img['file_name'])

        try:
            # 读取图像
            image = cv2.imread(image_path)
            if image is None:
                continue

            # 转换为灰度图
            gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

            # 1. 亮度 (平均像素值)
            brightness = np.mean(gray)
            quality_metrics['brightness'].append(brightness)

            # 2. 对比度 (像素值的标准差)
            contrast = np.std(gray)
            quality_metrics['contrast'].append(contrast)

            # 3. 清晰度 (拉普拉斯算子)
            laplacian = cv2.Laplacian(gray, cv2.CV_64F).var()
            quality_metrics['sharpness'].append(laplacian)

            # 4. 分辨率
            resolution = image.shape[0] * image.shape[1]  # 高 * 宽
            quality_metrics['resolution'].append(resolution)

            # 5. 宽高比
            aspect_ratio = image.shape[1] / image.shape[0]  # 宽/高
            quality_metrics['aspect_ratios'].append(aspect_ratio)

        except Exception as e:
            print(f"处理图像 {image_path} 时出错: {e}")

    # 绘制质量指标分布图
    plt.figure(figsize=(16, 12))

    # 1. 亮度分布
    plt.subplot(2, 2, 1)
    plt.hist(quality_metrics['brightness'], bins=30, alpha=0.7)
    plt.axvline(np.mean(quality_metrics['brightness']), color='r', linestyle='dashed', linewidth=1)
    plt.title('Brightness Distribution', fontsize=14)
    plt.xlabel('Brightness (Average Pixel Value)', fontsize=12)
    plt.ylabel('Number of Images', fontsize=12)
    plt.grid(True, alpha=0.3)

    # 2. 对比度分布
    plt.subplot(2, 2, 2)
    plt.hist(quality_metrics['contrast'], bins=30, alpha=0.7)
    plt.axvline(np.mean(quality_metrics['contrast']), color='r', linestyle='dashed', linewidth=1)
    plt.title('Contrast Distribution', fontsize=14)
    plt.xlabel('Contrast (Pixel Standard Deviation)', fontsize=12)
    plt.ylabel('Number of Images', fontsize=12)
    plt.grid(True, alpha=0.3)

    # 3. 清晰度分布
    plt.subplot(2, 2, 3)
    plt.hist(quality_metrics['sharpness'], bins=30, alpha=0.7)
    plt.axvline(np.mean(quality_metrics['sharpness']), color='r', linestyle='dashed', linewidth=1)
    plt.title('Sharpness Distribution', fontsize=14)
    plt.xlabel('Sharpness (Laplacian Variance)', fontsize=12)
    plt.ylabel('Number of Images', fontsize=12)
    plt.grid(True, alpha=0.3)

    # 4. 分辨率分布
    plt.subplot(2, 2, 4)
    plt.hist(quality_metrics['resolution'], bins=30, alpha=0.7)
    plt.axvline(np.mean(quality_metrics['resolution']), color='r', linestyle='dashed', linewidth=1)
    plt.title('Resolution Distribution', fontsize=14)
    plt.xlabel('Resolution (Pixel Count)', fontsize=12)
    plt.ylabel('Number of Images', fontsize=12)
    plt.xscale('log')
    plt.grid(True, alpha=0.3)

    plt.tight_layout()
    plt.savefig(os.path.join(output_dir, 'image_quality_metrics.png'), dpi=300)
    plt.close()

    # 计算并返回质量统计数据
    quality_stats = {
        'brightness': {
            'mean': np.mean(quality_metrics['brightness']),
            'std': np.std(quality_metrics['brightness']),
            'min': np.min(quality_metrics['brightness']),
            'max': np.max(quality_metrics['brightness'])
        },
        'contrast': {
            'mean': np.mean(quality_metrics['contrast']),
            'std': np.std(quality_metrics['contrast']),
            'min': np.min(quality_metrics['contrast']),
            'max': np.max(quality_metrics['contrast'])
        },
        'sharpness': {
            'mean': np.mean(quality_metrics['sharpness']),
            'std': np.std(quality_metrics['sharpness']),
            'min': np.min(quality_metrics['sharpness']),
            'max': np.max(quality_metrics['sharpness'])
        },
        'resolution': {
            'mean': np.mean(quality_metrics['resolution']),
            'std': np.std(quality_metrics['resolution']),
            'min': np.min(quality_metrics['resolution']),
            'max': np.max(quality_metrics['resolution'])
        },
        'aspect_ratio': {
            'mean': np.mean(quality_metrics['aspect_ratios']),
            'std': np.std(quality_metrics['aspect_ratios']),
            'min': np.min(quality_metrics['aspect_ratios']),
            'max': np.max(quality_metrics['aspect_ratios'])
        }
    }

    return quality_stats


def create_comprehensive_dataset_analysis(coco_data, output_dir):
    """创建综合数据集分析图表"""
    import matplotlib.pyplot as plt
    import numpy as np
    from collections import Counter, defaultdict
    import os

    os.makedirs(output_dir, exist_ok=True)

    # 获取类别信息
    categories = {cat['id']: cat['name'] for cat in coco_data['categories']}

    # 1. 类别不平衡分析
    category_counts = Counter([ann['category_id'] for ann in coco_data['annotations']])
    sorted_cats = sorted(category_counts.items(), key=lambda x: x[1], reverse=True)
    cat_ids, counts = zip(*sorted_cats)
    cat_names = [categories[cat_id] for cat_id in cat_ids]

    # 2. 边界框尺寸分析
    widths = []
    heights = []
    areas = []
    aspect_ratios = []

    for ann in coco_data['annotations']:
        bbox = ann['bbox']  # [x, y, width, height]
        w, h = bbox[2], bbox[3]

        if w > 0 and h > 0:  # 确保有效的边界框
            widths.append(w)
            heights.append(h)
            areas.append(w * h)
            aspect_ratios.append(w / h)

    # 3. 标注密度分析
    image_annotation_counts = defaultdict(int)
    for ann in coco_data['annotations']:
        image_annotation_counts[ann['image_id']] += 1

    annotations_per_image = list(image_annotation_counts.values())

    # 创建一个3x2的图表布局
    fig = plt.figure(figsize=(18, 12), dpi=300)

    # 1. 类别分布 (柱状图)
    ax1 = plt.subplot(2, 2, 1)
    bars = ax1.bar(range(len(cat_names)), counts, color=plt.cm.tab10.colors[:len(cat_names)])
    ax1.set_xticks(range(len(cat_names)))
    ax1.set_xticklabels(cat_names, rotation=45, ha='right')
    ax1.set_title('Class Distribution', fontsize=14)
    ax1.set_ylabel('Number of Annotations', fontsize=12)
    ax1.grid(axis='y', alpha=0.3)

    # 添加数量标签
    for bar in bars:
        height = bar.get_height()
        ax1.text(bar.get_x() + bar.get_width() / 2., height + 0.1,
                 f'{height:,}', ha='center', va='bottom', fontsize=9)

    # 计算并显示不平衡比例
    if counts:
        max_count = max(counts)
        min_count = min(counts)
        imbalance_ratio = max_count / min_count if min_count > 0 else float('inf')
        ax1.text(0.02, 0.95, f'Imbalance Ratio: {imbalance_ratio:.1f}:1',
                 transform=ax1.transAxes, fontsize=10,
                 bbox=dict(facecolor='white', alpha=0.8, boxstyle='round,pad=0.5'))

    # 2. 边界框尺寸分布 (散点图 + 直方图)
    ax2 = plt.subplot(2, 2, 2)

    # 主散点图: 宽度 vs 高度
    scatter = ax2.scatter(widths, heights, alpha=0.2, s=5, c='blue')
    ax2.set_xlabel('Width (pixels)', fontsize=12)
    ax2.set_ylabel('Height (pixels)', fontsize=12)
    ax2.set_title('Bounding Box Size Distribution', fontsize=14)
    ax2.grid(True, alpha=0.3)

    # 添加统计信息
    stats_text = (
        f"Count: {len(widths):,}\n"
        f"Mean W×H: {np.mean(widths):.1f}×{np.mean(heights):.1f}\n"
        f"Median W×H: {np.median(widths):.1f}×{np.median(heights):.1f}\n"
        f"Mean Area: {np.mean(areas):.1f}\n"
        f"Mean Aspect Ratio: {np.mean(aspect_ratios):.2f}"
    )
    ax2.text(0.02, 0.95, stats_text, transform=ax2.transAxes, fontsize=10,
             bbox=dict(facecolor='white', alpha=0.8, boxstyle='round,pad=0.5'))

    # 3. 边界框面积分布 (直方图)
    ax3 = plt.subplot(2, 2, 3)
    ax3.hist(np.log10(areas), bins=50, alpha=0.7, color='green')
    ax3.set_xlabel('Log10(Area) in pixels²', fontsize=12)
    ax3.set_ylabel('Frequency', fontsize=12)
    ax3.set_title('Bounding Box Area Distribution', fontsize=14)
    ax3.grid(True, alpha=0.3)

    # 添加面积分布统计
    area_thresholds = {
        'Small (<32²)': sum(1 for a in areas if a < 32 * 32),
        'Medium (32²-96²)': sum(1 for a in areas if 32 * 32 <= a < 96 * 96),
        'Large (>96²)': sum(1 for a in areas if a >= 96 * 96)
    }

    area_stats = "\n".join([f"{k}: {v:,} ({v / len(areas) * 100:.1f}%)"
                            for k, v in area_thresholds.items()])

    ax3.text(0.02, 0.95, area_stats, transform=ax3.transAxes, fontsize=10,
             bbox=dict(facecolor='white', alpha=0.8, boxstyle='round,pad=0.5'))

    # 4. 标注密度分析 (直方图 + 小提琴图)
    ax4 = plt.subplot(2, 2, 4)

    # 直方图
    ax4.hist(annotations_per_image, bins=30, alpha=0.7, color='purple')
    ax4.set_xlabel('Annotations per Image', fontsize=12)
    ax4.set_ylabel('Number of Images', fontsize=12)
    ax4.set_title('Annotation Density Distribution', fontsize=14)
    ax4.grid(True, alpha=0.3)

    # 添加密度统计
    density_stats = (
        f"Images: {len(annotations_per_image):,}\n"
        f"Total Annotations: {sum(annotations_per_image):,}\n"
        f"Mean: {np.mean(annotations_per_image):.1f}\n"
        f"Median: {np.median(annotations_per_image):.1f}\n"
        f"Max: {max(annotations_per_image)}\n"
        f"Min: {min(annotations_per_image)}"
    )

    ax4.text(0.02, 0.95, density_stats, transform=ax4.transAxes, fontsize=10,
             bbox=dict(facecolor='white', alpha=0.8, boxstyle='round,pad=0.5'))

    plt.suptitle('Comprehensive Dataset Analysis', fontsize=16)
    plt.tight_layout(rect=[0, 0.03, 1, 0.97])

    # 保存图表
    plt.savefig(os.path.join(output_dir, 'dataset_comprehensive_analysis.png'))
    plt.close()

    return {
        'class_distribution': dict(zip(cat_names, counts)),
        'bbox_stats': {
            'width_mean': np.mean(widths),
            'height_mean': np.mean(heights),
            'area_mean': np.mean(areas),
            'aspect_ratio_mean': np.mean(aspect_ratios)
        },
        'density_stats': {
            'mean': np.mean(annotations_per_image),
            'median': np.median(annotations_per_image),
            'max': max(annotations_per_image),
            'min': min(annotations_per_image)
        }
    }


def combine_analysis_figures(output_dir):
    """将多个分析图合并为一个总览图"""
    import matplotlib.pyplot as plt
    import matplotlib.gridspec as gridspec
    from PIL import Image
    import numpy as np

    # 定义要合并的四张图片路径
    image_paths = [
        os.path.join(output_dir, "category_distribution_pie.png"),  # 1. analyze_class_imbalance
        os.path.join(output_dir, "annotation_density_distribution.png"),  # 2. analyze_annotation_density
        os.path.join(output_dir, "bbox_width_height_scatter.png"),  # 3. bbox_width_height_scatter.png
        os.path.join(output_dir, "bbox_area_distribution.png")  # 4. bbox_area_distribution
    ]

    # 确保所有图片都存在
    missing_files = [path for path in image_paths if not os.path.exists(path)]
    if missing_files:
        print(f"Warning: The following files are missing: {missing_files}")
        return

    # 创建一个大的图形来容纳所有子图
    fig = plt.figure(figsize=(20, 16), dpi=600)

    # 使用GridSpec来创建更灵活的布局
    gs = gridspec.GridSpec(2, 2, width_ratios=[1, 1], height_ratios=[1, 1],
                           wspace=0.15, hspace=0.2)

    # 添加总标题
    fig.suptitle('Dataset Analysis Overview', fontsize=24, fontweight='bold', y=0.98)

    # 子图标题
    titles = [
        'Class Distribution Analysis',
        'Annotation Density Analysis',
        'Bounding Box Width vs Height Distribution',
        'Bounding Box Area Distribution'
    ]

    # 加载并添加每个子图
    for i, (img_path, title) in enumerate(zip(image_paths, titles)):
        # 计算行和列位置
        row = i // 2
        col = i % 2

        # 创建子图
        ax = fig.add_subplot(gs[row, col])

        # 加载图像
        try:
            img = Image.open(img_path)
            ax.imshow(np.array(img))
            ax.set_title(title, fontsize=18, pad=10)
            ax.axis('off')  # 隐藏坐标轴
        except Exception as e:
            print(f"Error loading image {img_path}: {e}")
            ax.text(0.5, 0.5, f"Error loading image", ha='center', va='center')
            ax.set_title(title, fontsize=18)
            ax.axis('off')

    # 调整布局
    plt.tight_layout(rect=[0, 0, 1, 0.96])  # 为顶部标题留出空间

    # 保存合并后的图像
    combined_path = os.path.join(output_dir, "combined_dataset_analysis.png")
    plt.savefig(combined_path, dpi=300, bbox_inches='tight')
    plt.close()

    print(f"Combined image saved to: {combined_path}")
    return combined_path


def combine_figures_for_publication(image_paths, output_path, fig_titles=None, main_title=None):
    """
    将多个图像按照学术论文规范合并为一个2x2的图表，确保所有子图等长宽

    参数:
    - image_paths: 要合并的图像路径列表
    - output_path: 输出图像的路径
    - fig_titles: 子图标题列表，如果为None则使用(a), (b), (c), (d)
    - main_title: 整个图表的标题，可选
    """
    import matplotlib.pyplot as plt
    import matplotlib.gridspec as gridspec
    import matplotlib.image as mpimg
    import numpy as np
    import os

    # 设置学术风格
    plt.style.use('default')
    plt.rcParams.update({
        'font.family': 'serif',
        'font.size': 10,
        'axes.labelsize': 11,
        'axes.titlesize': 12,
        'xtick.labelsize': 10,
        'ytick.labelsize': 10,
        'legend.fontsize': 10,
        'figure.titlesize': 14
    })

    # 加载所有图像
    images = []
    for path in image_paths:
        if os.path.exists(path):
            img = mpimg.imread(path)
            images.append(img)
        else:
            # 如果图像不存在，创建一个空白图像
            print(f"Warning: Image {path} not found. Using placeholder.")
            images.append(np.ones((100, 100, 3)))

    # 创建2x2布局的图形，使用固定的宽高比
    fig = plt.figure(figsize=(12, 10), dpi=600)  # 正方形画布

    # 使用GridSpec创建均匀的网格，并确保子图之间有适当的间距
    gs = gridspec.GridSpec(2, 2, figure=fig, wspace=0.25, hspace=0.4)

    # 默认子图标题
    if fig_titles is None:
        fig_titles = ['(a)', '(b)', '(c)', '(d)']

    # 添加每个子图
    for i, (img, title) in enumerate(zip(images, fig_titles)):
        row, col = i // 2, i % 2
        ax = fig.add_subplot(gs[row, col])

        # 显示图像
        ax.imshow(img)

        # 移除坐标轴
        ax.set_xticks([])
        ax.set_yticks([])
        ax.set_xticklabels([])
        ax.set_yticklabels([])

        # 添加子图标题 - 使用统一的位置和样式
        ax.set_title(title, loc='left', fontweight='bold', pad=10)

        # 添加边框
        for spine in ax.spines.values():
            spine.set_visible(True)
            spine.set_linewidth(0.5)

        # 设置子图的纵横比为1:1（正方形），无论输入图像的纵横比如何
        ax.set_aspect('equal', adjustable='box')

    # 添加总标题（如果提供）
    if main_title:
        fig.suptitle(main_title, fontweight='bold', y=0.98)
        # 调整布局以适应标题
        plt.subplots_adjust(top=0.92)

    # 保存图像，确保紧凑布局
    plt.savefig(output_path, bbox_inches='tight', pad_inches=0.2, dpi=600)
    plt.close()

    return output_path


def main():
    # 使用提供的路径
    # json_path = r"/media/ross/8TB/project/lsh/deep_learning/microAlgeaDataset/9899photos/2157061_1723389481/Annotations/coco_info.json"
    # image_dir = r"/media/ross/8TB/project/lsh/deep_learning/microAlgeaDataset/9899photos/2157061_1723389481/Images"
    # json_path = r"/media/ross/8TB/project/lsh/dataset/microAlgea/Algae6/train_valid/merged_annotations.json"
    # image_dir = r"/media/ross/8TB/project/lsh/dataset/microAlgea/Algae6/train_valid/images"
    json_path = r"/media/ross/8TB/project/lsh/deep_learning/microAlgeaDataset/2156944_1737533055/Annotations/coco_info.json"
    image_dir = r"/media/ross/8TB/project/lsh/deep_learning/microAlgeaDataset/2156944_1737533055/Images"
    # json_path = r"/media/ross/8TB/project/lsh/deep_learning/microAlgeaDataset/2156944_1737533055/Annotations/augmented_dataset_20250311_003330/augmented_annotations.json"
    # image_dir = r"/media/ross/8TB/project/lsh/deep_learning/microAlgeaDataset/2156944_1737533055/Annotations/augmented_dataset_20250311_003330/images"

    # 创建输出目录
    # output_dir = os.path.join(os.path.dirname(json_path), "advanced_analysis")
    output_dir = "/media/ross/8TB/project/lsh/deep_learning/DiffusionDet_mmdet/DiffusionDet/tools/mymodel_analysis_tools/figures_pre/dataset/advanced_analysis"
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    # 加载COCO数据
    print("加载COCO数据...")
    coco_data = load_coco_data(json_path)

    # 基本统计信息
    total_images = len(coco_data['images'])
    total_annotations = len(coco_data['annotations'])
    categories = {cat['id']: cat['name'] for cat in coco_data['categories']}

    print(f"总图片数量: {total_images}")
    print(f"总标注数量: {total_annotations}")
    print(f"类别数量: {len(categories)}")

    # 1. 分析边界框尺寸分布
    # print("分析边界框尺寸分布...")
    # bbox_data, categories = analyze_bbox_distributions(coco_data)
    # bbox_df = plot_bbox_size_distributions(bbox_data, categories, output_dir)

    # 2. 分析标注密度
    print("分析标注密度...")
    density_stats = analyze_annotation_density(coco_data, output_dir)

    # 3. 分析类别不平衡
    print("分析类别不平衡...")
    imbalance_stats = analyze_class_imbalance(coco_data, output_dir)

    # # 4. 分析最佳锚框尺寸
    # print("分析最佳锚框尺寸...")
    # anchor_results = analyze_anchor_clusters(bbox_data, categories, output_dir)

    # 5. 分析标注空间分布
    print("分析标注空间分布...")
    spatial_stats = analyze_spatial_distribution(coco_data, image_dir, output_dir)
    #
    # # 6. 分析图像质量
    # print("分析图像质量...")
    # # quality_stats = analyze_image_quality(coco_data, image_dir, output_dir)
    #
    #
    # create_comprehensive_dataset_analysis(coco_data, output_dir)
    # print("分析标签...")
    # plot_bbox_area_distribution(bbox_data, categories, output_dir)
    # combine_analysis_figures(output_dir)

    # 定义图片路径
    image_paths = [
        os.path.join(output_dir, "category_distribution_pie.png"),
        os.path.join(output_dir, "annotation_density_distribution.png"),
        os.path.join(output_dir, "bbox_width_height_scatter.png"),
        os.path.join(output_dir, "bbox_area_distribution.png")
    ]

    # 定义子图标题
    fig_titles = [
        '(a) Class Distribution Analysis',
        '(b) Annotation Density Analysis',
        '(c) Bounding Box Width vs Height',
        '(d) Bounding Box Area Distribution'
    ]

    # 合并图像
    print("合并图像...")
    combined_path = combine_figures_for_publication(
        image_paths,
        os.path.join(output_dir, "publication_ready_figure.png"),
        fig_titles=fig_titles,
        main_title="Figure 1: Dataset Analysis Overview"
    )

if __name__ == "__main__":
    # 设置随机种子以确保结果可重现
    np.random.seed(42)
    random.seed(42)

    # 设置matplotlib中文字体支持
    # try:
    #     plt.rcParams["font.sans-serif"] = ["SimHei"]  # 尝试使用 SimHei
    #     plt.rcParams["font.family"] = "sans-serif"
    #     plt.rcParams["axes.unicode_minus"] = False  # 正确显示负号
    #
    #     # 测试字体是否可用
    #     fig = plt.figure(figsize=(1, 1))
    #     plt.title("测试")
    #     plt.close(fig)
    # except:
    #     print("SimHei 字体不可用，尝试使用其他中文字体...")
    #     # 尝试其他中文字体
    #     plt.rcParams["font.sans-serif"] = ["Noto Sans CJK SC"]  # 使用 Noto Sans CJK SC
    #     plt.rcParams["axes.unicode_minus"] = False  # 正确显示负号

    main()
