import torch
import yaml
import os

import nibabel as nib
import numpy as np
import monai.transforms as transforms
from torch.utils.data import DataLoader,Dataset



def reader(path: str):
    with open(path, 'r', encoding='utf-8') as file:
        config = yaml.safe_load(file)
    return config

def get_data_config(path):
    return reader(path)

def open_nii(path):
    assert path.endswith('.nii') or path.endswith('.nii.gz'),'open_nii function parameter path is must end with .nii or .nii.gz'
    data = nib.load(path)
    data_array = data.get_fdata()
    return data_array

class BraTS(torch.utils.data.Dataset):
    def __init__(self, config_path='../configs/brats.yaml', is_train=True):
        super().__init__()
        self.config = get_data_config(config_path)['data']
        self.is_train = is_train
        self.image_size =  self.config['image_size']
        
        if self.is_train:
            self.data_root = self.config['train_data']
        else:
            self.data_root = self.config['val_data']
        
        self.tasks = self.config['tasks']
        
        self.samples = []
        for patient in os.listdir(self.data_root):
            patient_path = os.path.join(self.data_root, patient)
            if not os.path.isdir(patient_path): 
                continue
                
            files = os.listdir(patient_path)
            sample_dict = {}
            
            for file in files:
                task_name = file.split('.')[0].split("_")[-1]
                if task_name in self.tasks:
                    sample_dict[task_name] = os.path.join(patient_path, file)
            
            if all(task in sample_dict for task in self.tasks):
                self.samples.append(sample_dict)
        
        self.loading_transforms = transforms.Compose([
            transforms.LoadImaged(
                keys=self.tasks,
                image_only=False,
                ensure_channel_first=True,
                allow_missing_keys=True,
            ),
            transforms.EnsureTyped(keys=self.tasks, dtype=np.float32),
        ])
        
        self.shape_transform_list = transforms.Compose([
            transforms.ScaleIntensityRangePercentilesd(
                keys=self.tasks, lower=0.5, upper=99.5, 
                b_min=-1, b_max=1, clip=True, relative=False, channel_wise=True
            ),
            transforms.CenterSpatialCropd(
                keys=self.tasks, allow_missing_keys=True, roi_size=self.image_size
            )
        ])

        if self.is_train: # 训练使用数据增强
            self.augmentation_transforms = transforms.Compose([
                transforms.RandZoomd(
                    keys=self.tasks, allow_missing_keys=True, prob=0.2,
                    min_zoom=1.0, max_zoom=1.4,
                    mode=['trilinear', 'trilinear', 'trilinear', 'trilinear']
                ),
                transforms.RandFlipd(
                    keys=self.tasks, allow_missing_keys=True, 
                    prob=0.2, spatial_axis=[0,1]
                ),
                transforms.RandAdjustContrastd(
                    keys=self.tasks, allow_missing_keys=True,
                    prob=0.2, gamma=[1.0, 2.0], retain_stats=True
                )
            ])
        else: # 测试数据不使用数据增强
            self.augmentation_transforms = transforms.Compose([])  
        
        self.norm_transform_list = transforms.Compose([
            transforms.ScaleIntensityRangePercentilesd(
                keys=self.tasks, lower=0.5, upper=99.5,
                b_min=-1, b_max=1, clip=True, relative=False, channel_wise=True
            )
        ])

        self.total_transforms = transforms.Compose([
            self.loading_transforms,
            self.shape_transform_list,
            self.augmentation_transforms,
            self.norm_transform_list,
        ])
        
    def __len__(self):
        return len(self.samples)
    
    def __getitem__(self, index):
        sample_paths = self.samples[index]
        
        try:
            transformed_data = self.total_transforms(sample_paths)
            return transformed_data
        except Exception as e:
            print(f"Error transforming sample {index}: {e}")
            return self.__getitem__((index + 1) % len(self.samples))

def get_dataloader(data_name, config_path,is_train=True, batch_size=8, shuffle=True, num_workers=8, pin_memory=True, drop_last=True):
    assert data_name == 'brats', f'{data_name} is not exist'
    
    if data_name == 'brats':
        set = BraTS(config_path,is_train=is_train)
    loader = DataLoader(
        set,
        batch_size=batch_size,
        shuffle=shuffle,
        num_workers=num_workers,
        pin_memory=pin_memory,
        drop_last=drop_last)
    
    return loader



if __name__ == '__main__':
    train_loader, val_loader = get_dataloader('brats','/media/bmp1/store61/undergraduate_achieve/2022/WHF/mmm/configs/brats.yaml',batch_size=2)
    for data in train_loader:
        print(type(data))
        for task, d in data.items():
            print(task, d.shape)
        break

