#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
修正后的UCEIS数据集加载器
确保严格的数据分割，避免数据泄露
"""

import os
import torch
from torch.utils.data import Dataset
from PIL import Image
import torchvision.transforms as transforms
from pathlib import Path
from typing import Dict, Any, List, Tuple, Optional
import glob
import random
import numpy as np

# BERT Tokenizer
from transformers import BertTokenizer
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')


class CorrectedUCEISDataset(Dataset):
    """
    修正后的UCEIS评分数据集
    严格分离训练集和验证集，避免数据泄露
    """

    def __init__(self, data_dir: str, transform: Optional[transforms.Compose] = None,
                 split: str = 'train', random_seed: int = 42):
        """
        初始化数据集

        Args:
            data_dir: 数据根目录路径
            transform: 图像变换
            split: 'train', 'val', 'test'
            random_seed: 随机种子，确保可重现
        """
        self.data_dir = Path(data_dir)
        self.transform = transform
        self.split = split
        self.random_seed = random_seed
        self.samples = []

        # 设置随机种子
        random.seed(random_seed)
        np.random.seed(random_seed)

        # 扫描并分割数据
        self._scan_and_split_data()

        print(f"{split.upper()}数据集初始化完成: {len(self.samples)} 个样本")
        print(f"{split.upper()}集类别分布: {self._get_class_distribution()}")

    def _scan_and_split_data(self):
        """扫描数据目录并进行严格分割"""

        # 定义文件夹到分数的映射
        folder_to_score = {
            '1分': 1, '2分': 2, '3分': 3,
            '4分': 4, '5分': 5, '6分': 6, '7分': 7, '8分': 8
        }

        # 收集所有样本
        all_samples = []

        # 遍历所有子目录
        for subdir in self.data_dir.iterdir():
            if subdir.is_dir():
                # 处理"1分-3分"和"4分-8分"文件夹
                if subdir.name in ['1分-3分', '4分-8分']:
                    for score_folder in subdir.iterdir():
                        if score_folder.is_dir() and score_folder.name in folder_to_score:
                            score = folder_to_score[score_folder.name]
                            samples = self._collect_images_from_folder(score_folder, score)
                            all_samples.extend(samples)

        # 如果没有找到样本，尝试直接在根目录下查找分数文件夹
        if not all_samples:
            for folder_name, score in folder_to_score.items():
                folder_path = self.data_dir / folder_name
                if folder_path.exists() and folder_path.is_dir():
                    samples = self._collect_images_from_folder(folder_path, score)
                    all_samples.extend(samples)

        # 按类别分组
        samples_by_class = {}
        for sample in all_samples:
            score = sample['score']
            if score not in samples_by_class:
                samples_by_class[score] = []
            samples_by_class[score].append(sample)

        # 对每个类别进行分层分割
        split_samples = []

        for score, samples in samples_by_class.items():
            # 打乱当前类别的样本
            random.shuffle(samples)

            # 计算分割数量
            n_samples = len(samples)
            n_train = int(n_samples * 0.7)  # 70% 训练
            n_val = int(n_samples * 0.15)   # 15% 验证
            n_test = n_samples - n_train - n_val  # 15% 测试

            if self.split == 'train':
                split_samples.extend(samples[:n_train])
            elif self.split == 'val':
                split_samples.extend(samples[n_train:n_train + n_val])
            elif self.split == 'test':
                split_samples.extend(samples[n_train + n_val:])

            print(f"类别 {score}分: 总数={n_samples}, 训练={n_train}, 验证={n_val}, 测试={n_test}")

        self.samples = split_samples

    def _collect_images_from_folder(self, folder_path: Path, score: int) -> List[Dict]:
        """从指定文件夹收集图像文件"""
        samples = []

        # 递归查找所有.bmp和.jpg文件（包括子目录）
        image_files = []
        for ext in ['*.bmp', '*.jpg', '*.jpeg', '*.png']:
            # 在当前文件夹查找
            image_files.extend(folder_path.glob(ext))
            # 在子文件夹中递归查找
            image_files.extend(folder_path.rglob(ext))

        for img_path in image_files:
            # 标签转换为0-7索引（1-8分 -> 0-7索引）
            label = score - 1

            # 为每个图像创建唯一的临床文本
            unique_id = img_path.stem
            clinical_text = f"Patient with ulcerative colitis, UCEIS score {score}, image ID {unique_id}"

            sample = {
                'image_path': str(img_path),
                'score': score,
                'label': label,  # 0-7 for classification
                'clinical_text': clinical_text,
                'unique_id': unique_id  # 用于确保样本唯一性
            }

            samples.append(sample)

        return samples

    def _get_class_distribution(self) -> Dict[int, int]:
        """获取类别分布统计"""
        distribution = {}
        for sample in self.samples:
            score = sample['score']
            distribution[score] = distribution.get(score, 0) + 1
        return distribution

    def __len__(self) -> int:
        return len(self.samples)

    def __getitem__(self, idx: int) -> Tuple[torch.Tensor, str, torch.Tensor]:
        """
        获取单个样本

        Returns:
            image_tensor: 预处理后的图像张量
            text: 临床文本描述
            label: 分数标签（0-7）
        """
        sample = self.samples[idx]

        # 加载图像
        image = Image.open(sample['image_path']).convert('RGB')

        # 应用变换
        if self.transform:
            image = self.transform(image)

        # 获取文本和标签
        text = sample['clinical_text']
        label = torch.tensor(sample['label'], dtype=torch.long)

        return image, text, label


# 修正后的图像预处理变换
train_transform = transforms.Compose([
    # 数据增强（仅训练集使用）
    transforms.RandomRotation(degrees=[-10, 10]),
    transforms.RandomHorizontalFlip(p=0.5),
    transforms.RandomResizedCrop((224, 224), scale=(0.9, 1.0)),
    transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.05),

    # 调整尺寸和标准化
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])

val_transform = transforms.Compose([
    transforms.Resize((224, 224)),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])

test_transform = val_transform  # 测试集使用相同的变换


def corrected_custom_collate_fn(batch: List[Tuple], device: torch.device) -> Dict[str, Any]:
    """
    修正后的自定义批处理函数

    Args:
        batch: 批次样本列表 [(image, text, label), ...]
        device: 计算设备

    Returns:
        batch_dict: 包含批处理张量的字典
    """
    # 解包批次
    images, texts, labels = zip(*batch)

    # 处理图像和标签
    images = torch.stack(images).to(device)
    labels = torch.stack(labels).to(device)

    # 处理文本（BERT tokenization）
    text_batch = tokenizer(
        list(texts),
        padding='longest',
        truncation=True,
        max_length=256,  # 减少长度以提高效率
        return_tensors='pt'
    )

    # 将文本张量移动到设备
    text_batch = {k: v.to(device) for k, v in text_batch.items()}

    return {
        'images': images,
        'text_inputs': text_batch,
        'labels': labels
    }


def create_stratified_datasets(data_dir: str, random_seed: int = 42):
    """
    创建严格分离的训练、验证、测试数据集

    Args:
        data_dir: 数据根目录
        random_seed: 随机种子

    Returns:
        train_dataset, val_dataset, test_dataset
    """
    train_dataset = CorrectedUCEISDataset(
        data_dir=data_dir,
        transform=train_transform,
        split='train',
        random_seed=random_seed
    )

    val_dataset = CorrectedUCEISDataset(
        data_dir=data_dir,
        transform=val_transform,
        split='val',
        random_seed=random_seed
    )

    test_dataset = CorrectedUCEISDataset(
        data_dir=data_dir,
        transform=test_transform,
        split='test',
        random_seed=random_seed
    )

    return train_dataset, val_dataset, test_dataset