#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
严格的数据集处理 - 基于文件内容哈希的去重系统
解决相同文件内容但不同标签的数据泄露问题
"""

import os
import hashlib
import random
from pathlib import Path
from typing import List, Dict, Tuple, Set
from collections import defaultdict

def get_file_hash(file_path: str) -> str:
    """获取文件内容的MD5哈希值"""
    try:
        with open(file_path, 'rb') as f:
            return hashlib.md5(f.read()).hexdigest()
    except Exception as e:
        print(f"计算文件哈希失败 {file_path}: {e}")
        return None

def create_content_based_dataset(data_dir: str) -> Tuple[List[str], Dict]:
    """
    基于文件内容创建去重的数据集
    返回: (唯一文件列表, 统计信息)
    """
    print("=" * 80)
    print("基于文件内容创建严格去重数据集")
    print("=" * 80)

    data_path = Path(data_dir)
    if not data_path.exists():
        raise FileNotFoundError(f"数据目录不存在: {data_dir}")

    # 收集所有图像文件
    all_image_files = []
    print("1. 扫描所有图像文件...")

    for ext in ['*.bmp', '*.jpg', '*.jpeg', '*.png']:
        all_image_files.extend(data_path.rglob(ext))

    print(f"找到图像文件总数: {len(all_image_files)}")

    # 按文件内容哈希分组
    print("\n2. 计算文件内容哈希并分组...")
    content_groups = defaultdict(list)
    hash_collisions = 0

    for file_path in all_image_files:
        file_hash = get_file_hash(str(file_path))
        if file_hash:
            content_groups[file_hash].append(file_path)
        else:
            print(f"跳过无法读取的文件: {file_path}")

    print(f"唯一内容哈希数: {len(content_groups)}")

    # 分析重复文件
    duplicate_files = {hash_val: files for hash_val, files in content_groups.items() if len(files) > 1}
    print(f"重复内容文件组数: {len(duplicate_files)}")

    # 分析标签冲突
    print("\n3. 分析标签冲突...")
    conflict_groups = {}
    consistent_groups = {}

    for hash_val, files in content_groups.items():
        # 提取所有文件的分数
        scores = []
        for file_path in files:
            score = extract_score_from_path(file_path)
            if score is not None:
                scores.append(score)

        unique_scores = list(set(scores))

        if len(unique_scores) > 1:
            # 标签冲突：相同内容，不同分数
            conflict_groups[hash_val] = {
                'files': files,
                'scores': scores,
                'unique_scores': unique_scores,
                'count': len(files)
            }
        else:
            # 标签一致
            consistent_groups[hash_val] = {
                'files': files,
                'score': scores[0] if scores else None,
                'count': len(files)
            }

    print(f"标签冲突组数: {len(conflict_groups)}")
    print(f"标签一致组数: {len(consistent_groups)}")

    # 显示冲突统计
    if conflict_groups:
        print(f"\n[严重发现] {len(conflict_groups)} 组文件存在标签冲突！")
        print("这意味着相同内容的图像被标记为不同的UCEIS分数")

        # 统计冲突类型
        conflict_stats = defaultdict(int)
        for hash_val, info in conflict_groups.items():
            score_key = "-".join(map(str, sorted(info['unique_scores'])))
            conflict_stats[score_key] += info['count']

        print("冲突类型统计:")
        for score_combo, count in sorted(conflict_stats.items()):
            print(f"  分数{score_combo}: {count}个文件")

        # 显示部分冲突示例
        print("\n冲突示例 (前5组):")
        for i, (hash_val, info) in enumerate(list(conflict_groups.items())[:5]):
            print(f"\n示例 {i+1}: 哈希 {hash_val[:16]}...")
            print(f"  冲突分数: {info['unique_scores']}")
            for file_path in info['files']:
                score = extract_score_from_path(file_path)
                print(f"    {score}分: {file_path}")

    # 创建最终的数据集
    print("\n4. 创建最终去重数据集...")

    # 对于冲突文件，我们只保留一个（选择第一个）
    # 对于一致文件，保留一个
    final_files = []
    removed_conflicts = 0
    removed_duplicates = 0

    for hash_val, info in consistent_groups.items():
        # 保留标签一致的文件中的一个
        final_files.append(str(info['files'][0]))
        if len(info['files']) > 1:
            removed_duplicates += len(info['files']) - 1

    for hash_val, info in conflict_groups.items():
        # 对于冲突文件，保留第一个（任意选择一个分数）
        final_files.append(str(info['files'][0]))
        removed_conflicts += len(info['files']) - 1
        print(f"冲突解决: 保留 {extract_score_from_path(info['files'][0])}分版本，移除其他{len(info['files'])-1}个版本")

    print(f"\n最终数据集统计:")
    print(f"保留文件数: {len(final_files)}")
    print(f"移除冲突文件: {removed_conflicts} 个")
    print(f"移除重复文件: {removed_duplicates} 个")

    # 打乱文件顺序
    random.shuffle(final_files)

    # 统计信息
    stats = {
        'total_files_found': len(all_image_files),
        'unique_content_hashes': len(content_groups),
        'conflict_groups': len(conflict_groups),
        'consistent_groups': len(consistent_groups),
        'final_files': len(final_files),
        'removed_conflicts': removed_conflicts,
        'removed_duplicates': removed_duplicates,
        'conflict_details': conflict_groups
    }

    return final_files, stats

def extract_score_from_path(file_path: Path) -> int:
    """从文件路径提取UCEIS分数"""
    path_str = str(file_path).replace('\\', '/')
    path_parts = path_str.split('/')

    # 深度优先匹配分数
    for i in range(len(path_parts)-1, -1, -1):
        part = path_parts[i]
        for score in range(1, 9):
            if part == f"{score}分":
                return score

    # 备用方案
    for score in range(1, 9):
        if f"{score}分" in path_str:
            return score

    return None

def split_content_aware_dataset(final_files: List[str], train_ratio: float = 0.9, seed: int = 42) -> Tuple[List[str], List[str]]:
    """
    对内容去重后的数据集进行分割
    确保训练集和验证集完全没有内容重叠
    """
    print(f"\n5. 分割数据集 (训练集比例: {train_ratio})...")

    random.seed(seed)
    random.shuffle(final_files)

    split_index = int(len(final_files) * train_ratio)
    train_files = final_files[:split_index]
    val_files = final_files[split_index:]

    # 最终验证：确保没有内容重叠
    print("\n6. 最终验证...")
    train_hashes = set()
    val_hashes = set()

    for file_path in train_files:
        file_hash = get_file_hash(file_path)
        if file_hash:
            train_hashes.add(file_hash)

    for file_path in val_files:
        file_hash = get_file_hash(file_path)
        if file_hash:
            val_hashes.add(file_hash)

    overlap = train_hashes & val_hashes
    print(f"训练集文件数: {len(train_files)}")
    print(f"验证集文件数: {len(val_files)}")
    print(f"内容重叠检查: {len(overlap)} 个 (应该是0)")

    if len(overlap) > 0:
        print("[错误] 仍然存在内容重叠！")
        return None, None
    else:
        print("[成功] 训练集和验证集完全没有内容重叠")

    return train_files, val_files

def analyze_final_dataset(train_files: List[str], val_files: List[str]) -> Dict:
    """分析最终数据集的统计信息"""
    print("\n7. 分析最终数据集...")

    # 统计训练集分数分布
    train_scores = defaultdict(int)
    for file_path in train_files:
        score = extract_score_from_path(Path(file_path))
        if score:
            train_scores[score] += 1

    # 统计验证集分数分布
    val_scores = defaultdict(int)
    for file_path in val_files:
        score = extract_score_from_path(Path(file_path))
        if score:
            val_scores[score] += 1

    print("训练集分数分布:")
    for score in range(1, 9):
        count = train_scores.get(score, 0)
        percentage = count / len(train_files) * 100 if train_files else 0
        print(f"  {score}分: {count} 个 ({percentage:.1f}%)")

    print("验证集分数分布:")
    for score in range(1, 9):
        count = val_scores.get(score, 0)
        percentage = count / len(val_files) * 100 if val_files else 0
        print(f"  {score}分: {count} 个 ({percentage:.1f}%)")

    return {
        'train_distribution': dict(train_scores),
        'val_distribution': dict(val_scores),
        'train_total': len(train_files),
        'val_total': len(val_files)
    }

if __name__ == "__main__":
    # 测试严格数据集创建
    data_dir = r"D:\肠内镜数据库\UCEIS1-8"

    try:
        final_files, stats = create_content_based_dataset(data_dir)
        train_files, val_files = split_content_aware_dataset(final_files)

        if train_files and val_files:
            final_stats = analyze_final_dataset(train_files, val_files)

            print("\n" + "=" * 80)
            print("严格数据集创建完成")
            print("=" * 80)
            print(f"原始文件数: {stats['total_files_found']}")
            print(f"最终文件数: {stats['final_files']}")
            print(f"移除冲突: {stats['removed_conflicts']} 个")
            print(f"移除重复: {stats['removed_duplicates']} 个")
            print(f"训练集: {len(train_files)} 个")
            print(f"验证集: {len(val_files)} 个")

    except Exception as e:
        print(f"错误: {e}")
        import traceback
        traceback.print_exc()