#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
深度数据泄露诊断 - 找出真正的泄露原因
"""

import os
import sys
import random
import hashlib
from pathlib import Path
from typing import List, Dict, Set, Tuple
import torch
from torch.utils.data import DataLoader
import numpy as np

# 添加项目路径
sys.path.append(os.path.join(os.path.dirname(__file__), 'uc_model', 'utils'))

from dataset import create_file_list_from_directory, split_dataset_by_files, UCEISDataset, train_transform, val_transform, custom_collate_fn

def deep_leakage_diagnosis():
    """深度诊断数据泄露的真正原因"""

    print("=" * 80)
    print("深度数据泄露诊断")
    print("=" * 80)

    data_dir = r"D:\肠内镜数据库\UCEIS1-8"

    # 1. 获取所有文件并分析
    print("\n步骤1: 获取并分析所有文件...")
    all_files = create_file_list_from_directory(data_dir)
    print(f"总文件数: {len(all_files)}")

    # 2. 提取每个文件的详细信息
    print("\n步骤2: 提取文件详细信息...")
    file_details = []
    for file_path in all_files:
        try:
            path_obj = Path(file_path)
            file_size = path_obj.stat().st_size
            file_name = path_obj.name

            # 提取分数
            score = extract_score_from_path(path_obj)

            # 计算文件内容哈希（如果可能）
            file_hash = None
            try:
                with open(file_path, 'rb') as f:
                    file_hash = hashlib.md5(f.read()).hexdigest()[:16]
            except:
                pass

            file_details.append({
                'path': file_path,
                'name': file_name,
                'size': file_size,
                'score': score,
                'hash': file_hash
            })
        except Exception as e:
            print(f"处理文件错误 {file_path}: {e}")

    print(f"成功处理文件数: {len(file_details)}")

    # 3. 按文件名分组分析
    print("\n步骤3: 按文件名分组分析...")
    name_groups = {}
    for detail in file_details:
        name = detail['name']
        if name not in name_groups:
            name_groups[name] = []
        name_groups[name].append(detail)

    duplicate_names = {name: files for name, files in name_groups.items() if len(files) > 1}
    print(f"重复文件名组数: {len(duplicate_names)}")

    # 4. 分析重复文件的详细情况
    print("\n步骤4: 分析重复文件详细情况...")
    same_content_diff_score = 0
    diff_content_same_name = 0

    for name, files in duplicate_names.items():
        scores = [f['score'] for f in files if f['score']]
        unique_scores = list(set(scores))
        hashes = [f['hash'] for f in files if f['hash']]
        unique_hashes = list(set(hashes)) if hashes else []

        print(f"\n文件名: {name}")
        print(f"  出现次数: {len(files)}")
        print(f"  分数分布: {unique_scores}")
        print(f"  内容哈希: {unique_hashes[:3]}...")  # 只显示前3个

        # 检查相同内容不同分数（最严重的数据泄露）
        if len(unique_hashes) == 1 and len(unique_scores) > 1:
            same_content_diff_score += 1
            print(f"  [严重] 相同内容但不同分数！")
            for f in files:
                print(f"    {f['score']}分 -> {f['path']}")

        # 检查不同内容相同文件名
        elif len(unique_hashes) > 1:
            diff_content_same_name += 1
            print(f"  [警告] 相同文件名但内容不同")

    print(f"\n相同内容不同分数的文件: {same_content_diff_score} 组")
    print(f"相同文件名不同内容的文件: {diff_content_same_name} 组")

    # 5. 模拟我们的数据分割逻辑
    print("\n步骤5: 模拟数据分割逻辑...")

    # 创建去重后的文件列表（按照我们的逻辑）
    unique_file_entries = {}
    removed_duplicates = 0

    for detail in file_details:
        if detail['score'] is None:
            continue

        name_score_key = f"{detail['name']}_{detail['score']}"

        # 检查是否存在同名但不同分数的文件
        existing_entry = None
        for key, entry in unique_file_entries.items():
            if entry['name'] == detail['name'] and entry['score'] != detail['score']:
                existing_entry = entry
                break

        if existing_entry is not None:
            removed_duplicates += 1
            print(f"移除重复: {detail['name']} ({detail['score']}分) - 已存在{existing_entry['score']}分版本")
            continue

        if name_score_key not in unique_file_entries:
            unique_file_entries[name_score_key] = detail

    unique_file_list = [entry['path'] for entry in unique_file_entries.values()]
    print(f"去重后文件数: {len(unique_file_list)}")
    print(f"移除重复文件数: {removed_duplicates}")

    # 6. 执行分割并深度分析
    print("\n步骤6: 执行数据分割并深度分析...")
    random.seed(42)
    random.shuffle(unique_file_list)

    split_index = int(len(unique_file_list) * 0.9)
    train_files = unique_file_list[:split_index]
    val_files = unique_file_list[split_index:]

    print(f"训练集文件数: {len(train_files)}")
    print(f"验证集文件数: {len(val_files)}")

    # 7. 检查文件级别的重叠
    print("\n步骤7: 检查文件级别重叠...")
    overlap = set(train_files) & set(val_files)
    print(f"文件路径重叠: {len(overlap)} 个")

    # 8. 检查文件名重叠
    print("\n步骤8: 检查文件名重叠...")
    train_names = {Path(f).name for f in train_files}
    val_names = {Path(f).name for f in val_files}
    name_overlap = train_names & val_names
    print(f"文件名重叠: {len(name_overlap)} 个")

    # 9. 检查文件内容重叠（通过哈希）
    print("\n步骤9: 检查文件内容重叠...")
    train_hashes = {}
    val_hashes = {}

    for file_path in train_files:
        file_hash = get_file_hash(file_path)
        if file_hash:
            train_hashes[file_hash] = file_path

    for file_path in val_files:
        file_hash = get_file_hash(file_path)
        if file_hash:
            val_hashes[file_hash] = file_path

    hash_overlap = set(train_hashes.keys()) & set(val_hashes.keys())
    print(f"文件内容重叠: {len(hash_overlap)} 个")

    if hash_overlap:
        print("\n[严重] 发现内容重叠的文件！")
        for hash_val in list(hash_overlap)[:5]:
            print(f"  训练集: {train_hashes[hash_val]}")
            print(f"  验证集: {val_hashes[hash_val]}")

    # 10. 创建Dataset并检查实际行为
    print("\n步骤10: 检查Dataset实际行为...")

    try:
        train_dataset = UCEISDataset(train_files, train_transform, 'train')
        val_dataset = UCEISDataset(val_files, val_transform, 'val')

        print(f"训练数据集样本数: {len(train_dataset)}")
        print(f"验证数据集样本数: {len(val_dataset)}")

        # 检查实际的样本文件路径
        train_sample_paths = {s['image_path'] for s in train_dataset.samples}
        val_sample_paths = {s['image_path'] for s in val_dataset.samples}

        actual_overlap = train_sample_paths & val_sample_paths
        print(f"Dataset样本路径重叠: {len(actual_overlap)} 个")

        if actual_overlap:
            print("[严重] Dataset级别存在重叠！")
            for path in list(actual_overlap)[:3]:
                print(f"  重叠路径: {path}")

        # 检查样本文件名
        train_sample_names = {Path(s['image_path']).name for s in train_dataset.samples}
        val_sample_names = {Path(s['image_path']).name for s in val_dataset.samples}

        sample_name_overlap = train_sample_names & val_sample_names
        print(f"Dataset样本文件名重叠: {len(sample_name_overlap)} 个")

        # 11. 模拟一个mini训练过程来找出问题
        print("\n步骤11: 模拟训练过程找问题...")

        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

        train_loader = DataLoader(
            train_dataset,
            batch_size=2,
            shuffle=False,  # 不打乱以便追踪
            collate_fn=lambda batch: custom_collate_fn(batch, device),
            num_workers=0
        )

        val_loader = DataLoader(
            val_dataset,
            batch_size=2,
            shuffle=False,
            collate_fn=lambda batch: custom_collate_fn(batch, device),
            num_workers=0
        )

        # 获取第一个训练批次
        train_batch = next(iter(train_loader))
        print(f"训练批次大小: {train_batch['images'].shape[0]}")
        print(f"训练批次标签: {train_batch['labels'].tolist()}")

        # 获取第一个验证批次
        val_batch = next(iter(val_loader))
        print(f"验证批次大小: {val_batch['images'].shape[0]}")
        print(f"验证批次标签: {val_batch['labels'].tolist()}")

        # 检查是否有相同的图像被同时加载
        train_images_hash = []
        for i in range(train_batch['images'].shape[0]):
            img_tensor = train_batch['images'][i]
            img_hash = hash(img_tensor.data.tobytes())
            train_images_hash.append(img_hash)

        val_images_hash = []
        for i in range(val_batch['images'].shape[0]):
            img_tensor = val_batch['images'][i]
            img_hash = hash(img_tensor.data.tobytes())
            val_images_hash.append(img_hash)

        image_hash_overlap = set(train_images_hash) & set(val_images_hash)
        print(f"批次内图像哈希重叠: {len(image_hash_overlap)} 个")

        if image_hash_overlap:
            print("[严重] 在训练和验证批次中发现相同图像！")

    except Exception as e:
        print(f"Dataset创建错误: {e}")
        import traceback
        traceback.print_exc()

    # 12. 最终结论
    print("\n" + "=" * 80)
    print("诊断结论")
    print("=" * 80)

    if len(hash_overlap) > 0:
        print(f"[根本原因] 发现 {len(hash_overlap)} 个文件内容重叠")
        print("  这是最可能的数据泄露原因：相同的图像文件出现在训练集和验证集中")
    elif len(name_overlap) > 0:
        print(f"[可能原因] 发现 {len(name_overlap)} 个文件名重叠")
        print("  可能是相同的图像但文件大小略有不同")
    else:
        print("未发现明显的文件级别数据泄露")
        print("可能的原因：")
        print("  1. DataLoader的数据增强导致的问题")
        print("  2. 模型过拟合")
        print("  3. 评估逻辑错误")
        print("  4. 数据集太小导致的偶然高准确率")

def extract_score_from_path(img_path: Path):
    """从文件路径提取分数"""
    path_str = str(img_path)
    path_parts = path_str.replace('\\', '/').split('/')

    for i in range(len(path_parts)-1, -1, -1):
        part = path_parts[i]
        for score in range(1, 9):
            if part == f"{score}分":
                return score

    for score in range(1, 9):
        if f"{score}分" in path_str:
            return score

    return None

def get_file_hash(file_path: str) -> str:
    """获取文件内容哈希"""
    try:
        with open(file_path, 'rb') as f:
            return hashlib.md5(f.read()).hexdigest()[:16]
    except:
        return None

if __name__ == "__main__":
    deep_leakage_diagnosis()