import os
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from sklearn.model_selection import train_test_split
import glob

class VentricleDataset(Dataset):
    """脑室分割数据集类"""
    
    def __init__(self, image_dir, mask_dir, preprocessor, transform=None, mode='train'):
        """
        Args:
            image_dir: 原始图像目录
            mask_dir: 标注目录
            preprocessor: 预处理器实例
            transform: 数据增强函数
            mode: 'train', 'val' 或 'test'
        """
        self.image_dir = image_dir
        self.mask_dir = mask_dir
        self.preprocessor = preprocessor
        self.transform = transform
        self.mode = mode
        
        # 获取所有图像文件
        self.image_files = sorted(glob.glob(os.path.join(image_dir, '*.nii.gz')))
        self.mask_files = sorted(glob.glob(os.path.join(mask_dir, '*.nii.gz')))
        
        # 确保图像和标注对应
        assert len(self.image_files) == len(self.mask_files), "图像和标注数量不匹配"
        
        # 预处理所有数据并缓存
        self.data_cache = []
        self._preprocess_all_data()
        
    def _preprocess_all_data(self):
        """预处理所有数据并缓存"""
        print(f"预处理{self.mode}数据集...")
        
        for img_path, mask_path in zip(self.image_files, self.mask_files):
            # 确保文件名匹配
            img_name = os.path.basename(img_path)
            mask_name = os.path.basename(mask_path)
            assert img_name == mask_name, f"文件名不匹配: {img_name} vs {mask_name}"
            
            # 预处理
            slices, masks, metadata = self.preprocessor.preprocess_volume(img_path, mask_path)
            
            # 将每个切片作为一个样本
            for i in range(len(slices)):
                self.data_cache.append({
                    'image': slices[i],
                    'mask': masks[i] if len(masks) > i else None,
                    'filename': img_name,
                    'slice_idx': metadata['valid_indices'][i],
                    'metadata': metadata
                })
        
        print(f"完成预处理，共{len(self.data_cache)}个切片")
    
    def __len__(self):
        return len(self.data_cache)
    
    def __getitem__(self, idx):
        """获取一个样本"""
        sample = self.data_cache[idx]
        image = sample['image']
        mask = sample['mask']
        
        # 数据增强（仅训练时）
        if self.transform and self.mode == 'train':
            image, mask = self.transform(image, mask)
        
        # 添加通道维度
        image = np.expand_dims(image, axis=0)
        mask = np.expand_dims(mask, axis=0)
        
        # 转换为PyTorch张量 - 使用copy()确保数组是连续的，避免负步长问题
        image = torch.from_numpy(image.copy()).float()
        mask = torch.from_numpy(mask.copy()).float()
        
        return {
            'image': image,
            'mask': mask,
            'filename': sample['filename'],
            'slice_idx': sample['slice_idx']
        }


# 定义数据增强函数为顶层函数，使其可以被pickle
def train_augmentation(image, mask):
    from preprocessing import DataAugmentation
    # 随机翻转
    image, mask = DataAugmentation.random_flip(image, mask)
    # 随机强度偏移
    image = DataAugmentation.random_intensity_shift(image)
    return image, mask


class DataLoaderManager:
    """数据加载器管理类"""
    
    def __init__(self, config):
        self.config = config
        self.preprocessor = None
        self.train_loader = None
        self.val_loader = None
        self.test_loader = None
        
    def prepare_data_loaders(self, image_dir, mask_dir, preprocessor, 
                           train_ratio=0.7, val_ratio=0.15, batch_size=16):
        """准备训练、验证和测试数据加载器"""
        self.preprocessor = preprocessor
        
        # 获取所有图像文件
        all_images = sorted(glob.glob(os.path.join(image_dir, '*.nii.gz')))
        
        # 按病人划分数据集（不是按切片）
        patient_ids = [os.path.basename(f).split('.')[0] for f in all_images]
        unique_patients = list(set(patient_ids))
        
        # 划分数据集
        train_patients, test_patients = train_test_split(
            unique_patients, test_size=1-train_ratio, random_state=42
        )
        
        train_patients, val_patients = train_test_split(
            train_patients, test_size=val_ratio/(train_ratio), random_state=42
        )
        
        # 创建数据集
        train_dataset = self._create_dataset(
            image_dir, mask_dir, train_patients, preprocessor, mode='train'
        )
        val_dataset = self._create_dataset(
            image_dir, mask_dir, val_patients, preprocessor, mode='val'
        )
        test_dataset = self._create_dataset(
            image_dir, mask_dir, test_patients, preprocessor, mode='test'
        )
        
        # 创建数据加载器
        self.train_loader = DataLoader(
            train_dataset, batch_size=batch_size, shuffle=True, num_workers=4
        )
        self.val_loader = DataLoader(
            val_dataset, batch_size=batch_size, shuffle=False, num_workers=4
        )
        self.test_loader = DataLoader(
            test_dataset, batch_size=1, shuffle=False, num_workers=4
        )
        
        print(f"数据集划分完成:")
        print(f"训练集: {len(train_dataset)} 切片 ({len(train_patients)} 病人)")
        print(f"验证集: {len(val_dataset)} 切片 ({len(val_patients)} 病人)")
        print(f"测试集: {len(test_dataset)} 切片 ({len(test_patients)} 病人)")
        
        return self.train_loader, self.val_loader, self.test_loader
    
    def prepare_test_loader_only(self, image_dir, mask_dir, preprocessor, 
                              train_ratio=0.7, val_ratio=0.15, batch_size=1):
        """只准备测试数据加载器，避免重新处理训练和验证数据"""
        self.preprocessor = preprocessor
        
        # 获取所有图像文件
        all_images = sorted(glob.glob(os.path.join(image_dir, '*.nii.gz')))
        
        # 按病人划分数据集（不是按切片）
        patient_ids = [os.path.basename(f).split('.')[0] for f in all_images]
        unique_patients = list(set(patient_ids))
        
        # 划分数据集 - 使用相同的随机种子保持一致性
        train_patients, test_patients = train_test_split(
            unique_patients, test_size=1-train_ratio, random_state=42
        )
        
        # 只创建测试数据集
        test_dataset = self._create_dataset(
            image_dir, mask_dir, test_patients, preprocessor, mode='test'
        )
        
        # 创建测试数据加载器
        self.test_loader = DataLoader(
            test_dataset, batch_size=batch_size, shuffle=False, num_workers=4
        )
        
        print(f"测试数据集准备完成:")
        print(f"测试集: {len(test_dataset)} 切片 ({len(test_patients)} 病人)")
        
        return self.test_loader
    
    def _create_dataset(self, image_dir, mask_dir, patient_list, preprocessor, mode):
        """为特定病人列表创建数据集"""
        # 创建临时目录存储选定的病人数据
        temp_image_dir = os.path.join('run', 'temp', mode, 'images')
        temp_mask_dir = os.path.join('run', 'temp', mode, 'masks')
        
        os.makedirs(temp_image_dir, exist_ok=True)
        os.makedirs(temp_mask_dir, exist_ok=True)
        
        # 复制或链接选定病人的数据
        for patient_id in patient_list:
            img_path = os.path.join(image_dir, f"{patient_id}.nii.gz")
            mask_path = os.path.join(mask_dir, f"{patient_id}.nii.gz")
            
            if os.path.exists(img_path) and os.path.exists(mask_path):
                # 创建符号链接
                temp_img = os.path.join(temp_image_dir, f"{patient_id}.nii.gz")
                temp_mask = os.path.join(temp_mask_dir, f"{patient_id}.nii.gz")
                
                # 在Windows上使用复制而不是符号链接
                if not os.path.exists(temp_img):
                    import shutil
                    try:
                        # 尝试创建硬链接（比复制快）
                        os.link(os.path.abspath(img_path), temp_img)
                    except (OSError, AttributeError):
                        # 如果硬链接失败，则复制文件
                        shutil.copy2(img_path, temp_img)
                
                if not os.path.exists(temp_mask):
                    import shutil
                    try:
                        os.link(os.path.abspath(mask_path), temp_mask)
                    except (OSError, AttributeError):
                        shutil.copy2(mask_path, temp_mask)
        
        # 创建数据集
        dataset = VentricleDataset(
            temp_image_dir, temp_mask_dir, preprocessor, 
            transform=self._get_augmentation(mode), mode=mode
        )
        
        return dataset
    
    def _get_augmentation(self, mode):
        """获取数据增强函数"""
        if mode == 'train':
            return train_augmentation
        else:
            return None
    
    def get_sample_batch(self, num_samples=4):
        """获取样本批次用于可视化"""
        samples = []
        for i, batch in enumerate(self.train_loader):
            if i >= num_samples:
                break
            samples.append(batch)
        return samples