import os
import glob
from collections import Counter
import numpy as np

def parse_yolo_label(file_path):
    """解析YOLO格式标签文件，返回类别列表"""
    classes = []
    try:
        with open(file_path, 'r', encoding='utf-8') as f:
            for line in f:
                line = line.strip()
                if line:
                    parts = line.split()
                    if len(parts) >= 1:
                        class_id = int(parts[0])
                        classes.append(class_id)
    except Exception as e:
        print(f"解析文件 {file_path} 时出错: {e}")
    return classes

def analyze_dataset_distribution(base_path):
    """分析数据集中各个文件夹的类别分布"""
    folders = ['train', 'val', 'test']
    results = {}
    
    for folder in folders:
        folder_path = os.path.join(base_path, folder)
        label_files = glob.glob(os.path.join(folder_path, '*.txt'))
        
        if not label_files:
            print(f"警告: 在 {folder_path} 中没有找到标签文件")
            continue
            
        print(f"\n正在分析 {folder} 文件夹...")
        print(f"找到 {len(label_files)} 个标签文件")
        
        all_classes = []
        for label_file in label_files:
            classes = parse_yolo_label(label_file)
            all_classes.extend(classes)
        
        if not all_classes:
            print(f"警告: {folder} 文件夹中没有找到有效的类别标签")
            continue
            
        # 统计类别分布
        class_counter = Counter(all_classes)
        total_objects = len(all_classes)
        
        # 计算每个类别的数量和比例
        class_distribution = {}
        for class_id, count in class_counter.items():
            percentage = (count / total_objects) * 100
            class_distribution[class_id] = {
                'count': count,
                'percentage': percentage
            }
        
        results[folder] = {
            'total_files': len(label_files),
            'total_objects': total_objects,
            'class_distribution': class_distribution,
            'unique_classes': sorted(class_counter.keys())
        }
        
        # 打印当前文件夹的统计结果
        print(f"{folder} 数据集统计:")
        print(f"总标签文件数: {len(label_files)}")
        print(f"总目标数量: {total_objects}")
        print(f"类别分布:")
        for class_id in sorted(class_distribution.keys()):
            dist = class_distribution[class_id]
            print(f"  类别 {class_id}: {dist['count']} 个 ({dist['percentage']:.2f}%)")
    
    return results

def compare_distributions(results):
    """比较三个数据集的分布是否相似"""
    print("\n" + "="*50)
    print("分布相似性分析")
    print("="*50)
    
    if len(results) < 3:
        print("缺少部分数据集，无法进行完整比较")
        return
    
    # 获取所有数据集中出现的所有类别
    all_classes = set()
    for folder in ['train', 'val', 'test']:
        if folder in results:
            all_classes.update(results[folder]['unique_classes'])
    
    if not all_classes:
        print("没有找到有效的类别数据")
        return
    
    print(f"所有数据集中出现的类别: {sorted(all_classes)}")
    
    # 比较每个类别的分布
    print("\n各类别在不同数据集中的分布对比:")
    print("类别\tTrain\t\tVal\t\tTest\t\t最大差异")
    print("-" * 60)
    
    max_differences = {}
    
    for class_id in sorted(all_classes):
        percentages = []
        
        for folder in ['train', 'val', 'test']:
            if folder in results and class_id in results[folder]['class_distribution']:
                percentage = results[folder]['class_distribution'][class_id]['percentage']
                percentages.append(percentage)
            else:
                percentages.append(0.0)
        
        # 计算最大差异
        max_diff = max(percentages) - min(percentages)
        max_differences[class_id] = max_diff
        
        print(f"{class_id}\t{percentages[0]:.2f}%\t\t{percentages[1]:.2f}%\t\t{percentages[2]:.2f}%\t\t{max_diff:.2f}%")
    
    # 总体评估
    print("\n分布相似性评估:")
    avg_max_diff = np.mean(list(max_differences.values()))
    print(f"平均最大差异: {avg_max_diff:.2f}%")
    
    if avg_max_diff < 5.0:
        print("✅ 各类别在三个数据集中的分布非常相似")
    elif avg_max_diff < 10.0:
        print("⚠️  各类别在三个数据集中的分布基本相似，略有差异")
    elif avg_max_diff < 15.0:
        print("⚠️  各类别在三个数据集中的分布存在一定差异")
    else:
        print("❌ 各类别在三个数据集中的分布差异较大")
    
    # 检查数据集大小比例
    print("\n数据集大小比例:")
    total_objects = []
    for folder in ['train', 'val', 'test']:
        if folder in results:
            total_objects.append(results[folder]['total_objects'])
        else:
            total_objects.append(0)
    
    total_all = sum(total_objects)
    if total_all > 0:
        print(f"Train: {total_objects[0]} 个目标 ({total_objects[0]/total_all*100:.1f}%)")
        print(f"Val: {total_objects[1]} 个目标 ({total_objects[1]/total_all*100:.1f}%)")
        print(f"Test: {total_objects[2]} 个目标 ({total_objects[2]/total_all*100:.1f}%)")
        
        # 推荐的数据集划分比例检查
        expected_ratios = [0.7, 0.1, 0.2]  # 常见的训练:验证:测试比例
        actual_ratios = [x/total_all for x in total_objects]
        
        ratio_diff = sum(abs(a - e) for a, e in zip(actual_ratios, expected_ratios))
        if ratio_diff < 0.1:
            print("✅ 数据集划分比例合理")
        else:
            print("⚠️  数据集划分比例与常见比例(70-20-10)有较大差异")

def main():
    base_path = "/home/ubuntu/文档/paper_dataset/mydataset/my_usv_aug/integrated_dataset_seaships_usv_weak_nousv5/labels"
    
    print("开始分析YOLO数据集类别分布...")
    print(f"基础路径: {base_path}")
    
    # 检查路径是否存在
    if not os.path.exists(base_path):
        print(f"错误: 路径 {base_path} 不存在")
        return
    
    # 分析数据集分布
    results = analyze_dataset_distribution(base_path)
    
    # 比较分布相似性
    compare_distributions(results)

if __name__ == "__main__":
    main()