import os
import nibabel
import torch

class CRCDataset(torch.utils.data.Dataset):
    def __init__(self, root_dir, transform=None, test_flag=False):
        super().__init__()
        self.root_dir = root_dir
        self.transform = transform
        self.test_flag = test_flag

        self.image_dir = os.path.join(root_dir, 'images')
        self.mask_dir = os.path.join(root_dir, 'masks') if not test_flag else None
        
        self.case_list = []
        self.slice_indices = []
        
        # 收集病例元数据
        for img_file in sorted(os.listdir(self.image_dir)):
            if not img_file.endswith('.nii.gz'):
                continue
                
            pid = img_file.split('_')[0]
            img_path = os.path.join(self.image_dir, img_file)
            
            # 验证mask是否存在（训练模式）
            mask_path = None
            if not self.test_flag:
                mask_file = f"{pid}_T2_axi_roi.nii.gz"
                mask_path = os.path.join(self.mask_dir, mask_file)
                if not os.path.exists(mask_path):
                    continue

            # 获取图像尺寸信息
            img_nii = nibabel.load(img_path)
            num_slices = img_nii.header.get_data_shape()[2]
            img_nii.uncache()  # 释放资源
            
            # 记录病例元数据
            self.case_list.append({
                'pid': pid,
                'img_path': img_path,
                'mask_path': mask_path,
                'num_slices': num_slices
            })
            
            # 生成全局索引
            case_idx = len(self.case_list) - 1
            self.slice_indices.extend([(case_idx, s) for s in range(num_slices)])
        #构建一个合适的全局索引，形成切片与病例的对应
        # 初始化缓存系统
        self.cache = {}
        self.cache_order = []
        self.max_cache_size = 8  # 同时缓存8个病例

    def __len__(self):
        return len(self.slice_indices)

    def _load_case_data(self, case):
        """加载单个病例数据并加入缓存"""
        img_nii = nibabel.load(case['img_path'])
        img_data = img_nii.get_fdata()
        mask_data = None
        if not self.test_flag:
            mask_nii = nibabel.load(case['mask_path'])
            mask_data = mask_nii.get_fdata()
        return img_data, mask_data

    def __getitem__(self, idx):
        case_idx, slice_idx = self.slice_indices[idx]
        case = self.case_list[case_idx]
        
        # 缓存逻辑
        if case_idx in self.cache:
            img_data, mask_data = self.cache[case_idx]
            self.cache_order.remove(case_idx)
        else:
            img_data, mask_data = self._load_case_data(case)
            self.cache[case_idx] = (img_data, mask_data)
            while len(self.cache) > self.max_cache_size:
                expired = self.cache_order.pop()
                del self.cache[expired]
        
        self.cache_order.insert(0, case_idx)
        
        # 提取切片
        img_slice = img_data[:, :, slice_idx].copy()
        img_tensor = torch.tensor(img_slice, dtype=torch.float32).unsqueeze(0)
        virtual_path = f"{case['pid']}_slice{slice_idx:03d}"
        
        if self.test_flag:
            if self.transform:
                img_tensor = self.transform(img_tensor)
            return (img_tensor, img_tensor, virtual_path)
        
        else:
            mask_slice = mask_data[:, :, slice_idx].copy()
            mask_tensor = torch.tensor(mask_slice, dtype=torch.float32).unsqueeze(0)
            mask_tensor = torch.where(mask_tensor > 0, 1.0, 0.0)
            
            # 同步数据增强
            if self.transform:
                state = torch.get_rng_state()
                img_tensor = self.transform(img_tensor)
                torch.set_rng_state(state)
                mask_tensor = self.transform(mask_tensor)
            
            return (img_tensor, mask_tensor, virtual_path)