import SimpleITK as sitk 
import numpy as np 
import torch 
from monai import transforms
import glob 
from torch.utils.data import Dataset, DataLoader

def resample_img(
    image: sitk.Image,
    out_spacing = (2.0, 2.0, 2.0),
    out_size = None,
    is_label: bool = False,
    pad_value = 0.,
) -> sitk.Image:
    """
    Resample images to target resolution spacing
    Ref: SimpleITK
    """
    # get original spacing and size
    original_spacing = image.GetSpacing()
    original_size = image.GetSize()

    # convert our z, y, x convention to SimpleITK's convention
    out_spacing = list(out_spacing)[::-1]

    if out_size is None:
        # calculate output size in voxels
        out_size = [
            int(np.round(
                size * (spacing_in / spacing_out)
            ))
            for size, spacing_in, spacing_out in zip(original_size, original_spacing, out_spacing)
        ]

    # determine pad value
    if pad_value is None:
        pad_value = image.GetPixelIDValue()

    # set up resampler
    resample = sitk.ResampleImageFilter()
    resample.SetOutputSpacing(list(out_spacing))
    resample.SetSize(out_size)
    resample.SetOutputDirection(image.GetDirection())
    resample.SetOutputOrigin(image.GetOrigin())
    resample.SetTransform(sitk.Transform())
    resample.SetDefaultPixelValue(pad_value)
    if is_label:
        resample.SetInterpolator(sitk.sitkNearestNeighbor)
    else:
        resample.SetInterpolator(sitk.sitkBSpline)

    # perform resampling
    image = resample.Execute(image)

    return image

atm22_paths = glob.glob("/mnt/xingzhaohu/data/ATM22_1/imagesTr/*.nii.gz") + \
            glob.glob("/mnt/xingzhaohu/data/TrainBatch2_new/imagesTr/*.nii.gz")
luna16_paths = glob.glob("/mnt/xingzhaohu/data/luna16_convert/*.nii.gz")

covid19_paths = glob.glob("/home/xingzhaohu/sharefs/datasets/COVID-19-20_v2/*/*_ct.nii.gz")

flare21_paths = glob.glob("/mnt/xingzhaohu/data/FLARE2021/*.nii.gz") + \
             glob.glob("/mnt/xingzhaohu/data/FLARE2021/ValidationImg/*.nii.gz")

def build_ATM22():
    data_list = []
    for p in atm22_paths:
        data_list.append({"image": p})

    return data_list

def bulid_covid19():
    data_list = []
    for p in covid19_paths:
        data_list.append({"image": p})

    return data_list 

def build_flare2021():
    data_list = []
    for p in flare21_paths:
        data_list.append({"image": p})

    return data_list  

def build_luna16():
    data_list = []
    for p in luna16_paths:
        data_list.append({"image": p})
    
    return data_list

def random_selected(data: list, n):
    import random 
    total = len(data)
    val_data = []
    for i in range(n):
        random_i = random.randint(0, total-1)
        val_data.append(data[random_i])
        data.pop(random_i)
        total = len(data)
    
    return data, val_data

class PretrainDataset(Dataset):
    def __init__(self, datalist, transform=None, cache=False) -> None:
        super().__init__()
        self.transform = transform
        self.datalist = datalist
        self.cache = cache
        if cache:
            self.cache_data = []
            for i in range(len(datalist)):
                self.cache_data.append(self.read_image(datalist[i]))

    def __getitem__(self, index):
        if self.cache:
            image = self.cache_data[index]
        else :
            image = self.read_image(self.datalist[index])
        if self.transform is not None :
            image = self.transform(image)
        
        return image

    @classmethod
    def read_image(self, data):
        image = sitk.ReadImage(data["image"])
        image = resample_img(image, out_spacing=[1.5, 1.5, 1.5])

        image = sitk.GetArrayFromImage(image).astype(np.float32)
        if len(image.shape) == 3:
            image = np.expand_dims(image, axis=0)
        return image 

    def __len__(self):
        return len(self.datalist)

def get_loader(batch_size, train_cache, val_cache):
    datalist1 = bulid_covid19()
    datalist2 = build_ATM22()
    datalist3 = build_flare2021()
    datalist4 = build_luna16()

    num_workers = 8
    print("Dataset 1 covid-19: number of data: {}".format(len(datalist1)))
    print("Dataset 2 ATM22: number of data: {}".format(len(datalist2)))
    print("Dataset 3 FLARE21: number of data: {}".format(len(datalist3)))
    print("Dataset 4 Luna16: number of data: {}".format(len(datalist4)))
   
    datalist = datalist1 
    print("Dataset all training: number of data: {}".format(len(datalist)))

    train_data, val_data = random_selected(datalist, 16)

    print(f"training data is {len(train_data)}, validation data is {len(val_data)}")

    spatial_size = [224, 224, 224]
    train_transforms = transforms.Compose(
        [

            transforms.ScaleIntensityRange(a_min=-1000, a_max=1000, b_min=0.0, b_max=1.0, clip=True
            ),
            transforms.SpatialPad(spatial_size=spatial_size),
            
            transforms.CenterSpatialCrop(roi_size=spatial_size),
            # transforms.RandSpatialCrop(roi_size=spatial_size,
            #                             random_size=False,
            #                             ),
            
            transforms.ToTensor(),
        ]
    )

    val_transforms = transforms.Compose(
        [
            transforms.ScaleIntensityRange(
                a_min=-1000, a_max=1000, b_min=0.0, b_max=1.0, clip=True
            ),
            transforms.SpatialPad(spatial_size=spatial_size),
            # CropForegroundd(keys=["image"], source_key="image", k_divisible=[96, 96, 96]),
            # RandSpatialCropSamplesd(
            #     keys=["image"],
            #     roi_size=[96, 96, 96],
            #     num_samples=2,
            #     random_center=True,
            #     random_size=False,
            # ),
            # RandSpatialCropd(keys=["image"], roi_size=[96, 96, 96],
            #                             random_size=False,
            #                             allow_missing_keys=True),
            transforms.CenterSpatialCrop(roi_size=spatial_size,                                                     
                                        ),
            transforms.ToTensor(),
        ]
    )

    train_ds = PretrainDataset(train_data, transform=train_transforms, cache=train_cache)
    
    train_loader = DataLoader(
        train_ds, batch_size=batch_size, num_workers=num_workers, drop_last=True
    )
    
    val_ds = PretrainDataset(val_data, transform=val_transforms, cache=val_cache)

    val_loader = DataLoader(
        val_ds, batch_size=batch_size, num_workers=8, drop_last=True, shuffle=False
    )

    return train_ds, val_ds 