"""
数据集定义 - 弯液面体积预测数据集
"""
import os
import random
import numpy as np
import torch
from torch.utils.data import Dataset
from PIL import Image

from .utils import extract_meniscus_features
from .transforms import get_train_transforms, get_val_transforms


def filter_negative_volumes(data):
    """
    过滤掉体积为负数的数据
    
    参数:
        data: 原始数据列表
    
    返回:
        filtered_data: 过滤后的数据列表
        removed_count: 移除的数据数量
    """
    original_count = len(data)
    filtered_data = [item for item in data if item['meniscus_volume'] >= 0]
    removed_count = original_count - len(filtered_data)
    
    if removed_count > 0:
        print(f"  Warning: Removed {removed_count} samples with negative volume")
        print(f"  (Original: {original_count}, Kept: {len(filtered_data)})")
    
    return filtered_data, removed_count


class MeniscusDataset(Dataset):
    """
    弯液面体积预测数据集
    
    每个样本包含:
        - 20张弯液面图像
        - 20个对应的像素尺度
        - 6个传统特征（自动从图像提取）
        - 1个体积标签
    
    参数:
        data: 数据列表，每个元素包含:
            - image_paths: 图像路径列表 (20张)
            - pixel_scales: 像素尺度列表 (20个)
            - meniscus_volume: 弯液面体积
        image_size: 图像尺寸 (width, height)
        augment: 是否进行数据增强
        filter_negative: 是否过滤负体积数据
        transform: 自定义变换（可选）
    """
    def __init__(self, data, image_size=(288, 96), augment=False, 
                 filter_negative=True, transform=None):
        # 过滤负体积数据
        if filter_negative:
            self.data, removed = filter_negative_volumes(data)
        else:
            self.data = data
            removed = 0
        
        self.image_size = image_size
        self.augment = augment
        
        # 获取数据增强变换
        if transform is not None:
            self.transform = transform
        elif augment:
            self.transform = get_train_transforms()
        else:
            self.transform = get_val_transforms()
        
        print(f"Dataset initialized:")
        print(f"  Samples: {len(self.data)}")
        if removed > 0:
            print(f"  Filtered negative volumes: {removed}")
        print(f"  Image size: {image_size}")
        print(f"  Augmentation: {augment}")
    
    def __len__(self):
        return len(self.data)
    
    def load_and_preprocess_image(self, image_path, pixel_scale):
        """
        加载并预处理单张图像
        
        参数:
            image_path: 图像路径
            pixel_scale: 像素尺度
        
        返回:
            img_tensor: 图像张量 [1, H, W]
            features: 提取的特征列表 [6]
        """
        try:
            if os.path.exists(image_path):
                # 加载图像并转为灰度
                img = Image.open(image_path).convert('L')
                img = img.resize(self.image_size, Image.BILINEAR)
                img_array = np.array(img).astype(np.float32) / 255.0
            else:
                # 图像不存在时使用零数据
                print(f"Warning: Image not found - {image_path}")
                img_array = np.zeros(self.image_size[::-1], dtype=np.float32)
        except Exception as e:
            print(f"Error loading image {image_path}: {e}")
            img_array = np.zeros(self.image_size[::-1], dtype=np.float32)
        
        # 提取传统特征
        features = extract_meniscus_features(img_array, pixel_scale)
        
        # 应用数据增强
        if self.transform:
            img_array = self.transform(img_array)
        
        # 确保数组是连续的
        img_array = np.ascontiguousarray(img_array)
        
        # 转换为张量
        img_tensor = torch.from_numpy(img_array).float().unsqueeze(0)  # [1, H, W]
        
        return img_tensor, features
    
    def __getitem__(self, idx):
        """
        获取一个样本
        
        返回:
            images: 图像张量 [20, 1, H, W]
            pixel_scales: 像素尺度张量 [20]
            traditional_features: 传统特征张量 [6]
            meniscus_volume: 体积标签张量 [1]
        """
        item = self.data[idx]
        
        images = []
        all_features = []
        
        # 加载所有20张图像
        for img_path, pixel_scale in zip(item['image_paths'], item['pixel_scales']):
            img_tensor, features = self.load_and_preprocess_image(img_path, pixel_scale)
            images.append(img_tensor)
            all_features.append(features)
        
        # 堆叠图像
        images = torch.stack(images)  # [20, 1, H, W]
        
        # 计算平均特征
        features_array = np.array(all_features)
        mean_features = np.mean(features_array, axis=0)
        
        # 转换为张量
        traditional_features = torch.tensor(mean_features, dtype=torch.float32)
        pixel_scales = torch.tensor(item['pixel_scales'], dtype=torch.float32)
        meniscus_volume = torch.tensor([item['meniscus_volume']], dtype=torch.float32)
        
        return images, pixel_scales, traditional_features, meniscus_volume


class MeniscusDatasetMemoryOptimized(Dataset):
    """
    内存优化版本的数据集
    
    不预先加载所有图像，而是在需要时才加载
    适合大规模数据集或内存受限的情况
    
    参数:
        data: 数据列表
        image_size: 图像尺寸
        augment: 是否数据增强
        cache_size: 缓存大小（最近使用的样本数）
        filter_negative: 是否过滤负体积
    """
    def __init__(self, data, image_size=(288, 96), augment=False, 
                 cache_size=100, filter_negative=True):
        # 过滤负体积数据
        if filter_negative:
            self.data, removed = filter_negative_volumes(data)
        else:
            self.data = data
            removed = 0
        
        self.image_size = image_size
        self.augment = augment
        self.cache_size = cache_size
        self._cache = {}
        
        # 获取变换
        if augment:
            self.transform = get_train_transforms()
        else:
            self.transform = get_val_transforms()
        
        print(f"Memory-optimized dataset initialized:")
        print(f"  Samples: {len(self.data)}")
        if removed > 0:
            print(f"  Filtered negative volumes: {removed}")
        print(f"  Cache size: {cache_size}")
    
    def __len__(self):
        return len(self.data)
    
    def __getitem__(self, idx):
        # 检查缓存
        if idx in self._cache:
            return self._cache[idx]
        
        item = self.data[idx]
        
        images = []
        all_features = []
        
        # 创建临时数据集实例来加载图像
        temp_dataset = MeniscusDataset(
            [item], 
            self.image_size, 
            self.augment, 
            filter_negative=False,
            transform=self.transform
        )
        
        for img_path, pixel_scale in zip(item['image_paths'], item['pixel_scales']):
            img_tensor, features = temp_dataset.load_and_preprocess_image(img_path, pixel_scale)
            images.append(img_tensor)
            all_features.append(features)
        
        images = torch.stack(images)
        features_array = np.array(all_features)
        mean_features = np.mean(features_array, axis=0)
        
        traditional_features = torch.tensor(mean_features, dtype=torch.float32)
        pixel_scales = torch.tensor(item['pixel_scales'], dtype=torch.float32)
        meniscus_volume = torch.tensor([item['meniscus_volume']], dtype=torch.float32)
        
        result = (images, pixel_scales, traditional_features, meniscus_volume)
        
        # 更新缓存（LRU策略）
        if len(self._cache) >= self.cache_size:
            # 移除最旧的条目
            oldest_key = next(iter(self._cache))
            self._cache.pop(oldest_key)
        self._cache[idx] = result
        
        return result


def create_dataloaders(train_data, val_data, batch_size, num_workers, 
                      image_size, augment_train=True, filter_negative=True,
                      use_memory_optimized=False):
    """
    创建训练和验证数据加载器
    
    参数:
        train_data: 训练数据列表
        val_data: 验证数据列表
        batch_size: 批次大小
        num_workers: 工作进程数
        image_size: 图像尺寸
        augment_train: 是否对训练集增强
        filter_negative: 是否过滤负体积数据
        use_memory_optimized: 是否使用内存优化版本
    
    返回:
        train_loader: 训练数据加载器
        val_loader: 验证数据加载器
    """
    from torch.utils.data import DataLoader
    
    # 选择数据集类型
    DatasetClass = MeniscusDatasetMemoryOptimized if use_memory_optimized else MeniscusDataset
    
    # 创建数据集
    if use_memory_optimized:
        train_dataset = DatasetClass(
            train_data, 
            image_size=image_size, 
            augment=augment_train, 
            cache_size=100,
            filter_negative=filter_negative
        )
        val_dataset = DatasetClass(
            val_data, 
            image_size=image_size, 
            augment=False,
            cache_size=50,
            filter_negative=filter_negative
        )
    else:
        train_dataset = DatasetClass(
            train_data, 
            image_size=image_size, 
            augment=augment_train, 
            filter_negative=filter_negative
        )
        val_dataset = DatasetClass(
            val_data, 
            image_size=image_size, 
            augment=False, 
            filter_negative=filter_negative
        )
    
    # 创建数据加载器
    train_loader = DataLoader(
        train_dataset, 
        batch_size=batch_size, 
        shuffle=True, 
        num_workers=num_workers,
        pin_memory=True if torch.cuda.is_available() else False,
        drop_last=False
    )
    
    val_loader = DataLoader(
        val_dataset, 
        batch_size=batch_size, 
        shuffle=False, 
        num_workers=num_workers,
        pin_memory=True if torch.cuda.is_available() else False,
        drop_last=False
    )
    
    print(f"\nDataLoaders created:")
    print(f"  Train batches: {len(train_loader)}")
    print(f"  Val batches: {len(val_loader)}")
    print(f"  Batch size: {batch_size}")
    print(f"  Workers: {num_workers}")
    
    return train_loader, val_loader


if __name__ == "__main__":
    # 测试数据集
    print("Testing dataset...")
    
    # 创建模拟数据
    test_data = []
    for i in range(10):
        volume = np.random.rand() * 20 + 5  # [5, 25]
        test_data.append({
            'image_paths': [f'img_{i}_{j}.png' for j in range(20)],
            'pixel_scales': [0.1 + np.random.rand() * 0.05 for _ in range(20)],
            'meniscus_volume': volume
        })
    
    print(f"\nTest data statistics:")
    volumes = [item['meniscus_volume'] for item in test_data]
    print(f"  Total samples: {len(volumes)}")
    print(f"  Volume range: [{min(volumes):.2f}, {max(volumes):.2f}]")
    
    # 创建数据集
    print("\nCreating dataset...")
    dataset = MeniscusDataset(test_data, image_size=(288, 96), augment=True)
    
    # 测试获取样本
    print("\nTesting sample retrieval...")
    images, scales, features, volume = dataset[0]
    
    print(f"\nSample shapes:")
    print(f"  Images: {images.shape}")
    print(f"  Scales: {scales.shape}")
    print(f"  Features: {features.shape}")
    print(f"  Volume: {volume.shape}")
    print(f"  Volume value: {volume.item():.2f}")
    
    print("\n✓ Dataset test passed!")