# Copyright 2020 - 2022 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#     http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sklearn.model_selection import KFold  ## K折交叉验证
import pickle
import os
import json
import math
import numpy as np
import torch
from monai import transforms, data
import SimpleITK as sitk
from tqdm import tqdm 
from torch.utils.data import Dataset 
import glob 

class MedicalDataset(Dataset):
    def __init__(self, data_dir, transform=None) -> None:
        super().__init__()
        
        self.datalist = glob.glob(f"{data_dir}/*.npz")
        self.transform = transform
        print(f"data length is {len(self.datalist)}")
        
    def read_data(self, data_path):
        
        data = np.load(data_path)

        image_data = data["data"]
        seg_data = data["seg"]
        properties_path = f"{data_path[:-4]}.pkl"
        df = open(properties_path, "rb")

        info = pickle.load(df)
    
        return {
            "data": image_data,
            "seg": seg_data,
            "properties": info,
        } 

    def __getitem__(self, i):
      
        image = self.read_data(self.datalist[i])
       
        if self.transform is not None :
            image = self.transform(image)
        
        return image

    def __len__(self):
        return len(self.datalist)

def get_kfold_data(data_paths, n_splits, shuffle=False):
    X = np.arange(len(data_paths))
    kfold = KFold(n_splits=n_splits, shuffle=shuffle)  ## kfold为KFolf类的一个对象
    return_res = []
    for a, b in kfold.split(X):
        fold_train = []
        fold_val = []
        for i in a:
            fold_train.append(data_paths[i])
        for j in b:
            fold_val.append(data_paths[j])
        return_res.append({"train_data": fold_train, "val_data": fold_val})

    return return_res


def get_loader(data_dir):

    train_dir = os.path.join(data_dir, "train")
    test_dir = os.path.join(data_dir, "test")
    
    train_transform = transforms.Compose(
        [   
            transforms.ScaleIntensityRanged(
                keys=["data"], a_min=-175, a_max=250.0, b_min=0, b_max=1.0, clip=True
            ),
            transforms.CropForegroundd(keys=["data", "seg"], source_key="data"),

            transforms.RandCropByPosNegLabeld(
                keys=["data", "seg"],
                label_key="seg",
                spatial_size=[64, 192, 160],
                pos=1,
                neg=1,
                num_samples=2,
                image_key="data",
                image_threshold=0,
            ),
            transforms.SpatialPadd(keys=["data", "seg"], spatial_size=[64, 192, 160]),
            transforms.RandFlipd(keys=["data", "seg"], prob=0.2, spatial_axis=0),
            transforms.RandFlipd(keys=["data", "seg"], prob=0.2, spatial_axis=1),
            transforms.RandFlipd(keys=["data", "seg"], prob=0.2, spatial_axis=2),
            transforms.RandZoomd(["data", "seg"], prob=0.1),
            transforms.RandRotated(["data", "seg"], 15, 15, 15, 0.1),
            # transforms.RandRotate90d(["image", "label"]),
            
            # transforms.RandAdjustContrastd(["image"], prob=0.1),
            # transforms.RandGaussianSharpend(["image"], prob=0.1),
            # transforms.RandKSpaceSpikeNoised(["image"], prob=0.1),
            # transforms.RandGibbsNoised(["image"], prob=0.1),
            # transforms.RandGaussianSmoothd(["image"], prob=0.1),
            
            transforms.RandScaleIntensityd(keys="data", factors=0.1, prob=0.1),
            transforms.RandShiftIntensityd(keys="data", offsets=0.1, prob=0.1),
            transforms.ToTensord(keys=["data", "seg"],),
        ]
    )
    val_transform = transforms.Compose(
        [   
            transforms.ScaleIntensityRanged(
                keys=["data"], a_min=-175, a_max=250.0, b_min=0, b_max=1.0, clip=True
            ),
            transforms.CropForegroundd(keys=["data", "seg"], source_key="data"),

            transforms.ToTensord(keys=["data", "seg"]),
        ]
    )

    train_ds = MedicalDataset(train_dir, transform=train_transform)
    
    # val_ds = MedicalDataset(val_image_dir, val_label_dir, transform=val_transform)

    test_ds = MedicalDataset(test_dir, transform=val_transform)

    loader = [train_ds, test_ds, test_ds]

    return loader


def test_get_loader(data_dir):

    train_image_dir = os.path.join(data_dir, "imagesTr")
    train_label_dir = os.path.join(data_dir, "labelsTr")
    val_image_dir = os.path.join(data_dir, "imagesVal")
    val_label_dir = os.path.join(data_dir, "labelsVal")
    test_image_dir = os.path.join(data_dir, "imagesTs")
    test_label_dir = os.path.join(data_dir, "labelsTs")
    
    # all_dirs = os.listdir(data_dir)
    # all_paths = [os.path.join(data_dir, d) for d in all_dirs]
    # size = len(all_paths)
    # train_size = int(0.7 * size)
    # val_size = int(0.1 * size)
    # train_files = all_paths[:train_size]
    # val_files = all_paths[train_size:train_size + val_size]
    # test_files = all_paths[train_size+val_size:]
    # print(f"train is {len(train_files)}, val is {len(val_files)}, test is {len(test_files)}")

    train_transform = transforms.Compose(
        [   
            # transforms.ScaleIntensityRanged(
            #     keys=["image"], a_min=-1000, a_max=1250.0, b_min=0, b_max=1.0, clip=True
            # ),
            transforms.CropForegroundd(keys=["image", "label"], source_key="image"),

            # transforms.RandCropByPosNegLabeld(
            #     keys=["image", "label"],
            #     label_key="label",
            #     spatial_size=(96,96,96),
            #     pos=1,
            #     neg=1,
            #     num_samples=4,
            #     image_key="image",
            #     image_threshold=0,
            # ),
            # transforms.SpatialPadd(keys=["image", "label"], spatial_size=(96, 96, 96)),
            transforms.RandFlipd(keys=["image", "label"], prob=0.2, spatial_axis=0),
            transforms.RandFlipd(keys=["image", "label"], prob=0.2, spatial_axis=1),
            transforms.RandFlipd(keys=["image", "label"], prob=0.2, spatial_axis=2),
            # transforms.RandZoomd(["image", "label"], prob=0.1),
            # transforms.RandRotated(["image", "label"], 30, 30, 30, 0.1),
            # transforms.RandRotate90d(["image", "label"]),
            
            # transforms.RandAdjustContrastd(["image"], prob=0.1),
            # transforms.RandGaussianSharpend(["image"], prob=0.1),
            # transforms.RandKSpaceSpikeNoised(["image"], prob=0.1),
            # transforms.RandGibbsNoised(["image"], prob=0.1),
            # transforms.RandGaussianSmoothd(["image"], prob=0.1),
            
            transforms.RandScaleIntensityd(keys="image", factors=0.1, prob=0.1),
            transforms.RandShiftIntensityd(keys="image", offsets=0.1, prob=0.1),
            transforms.ToTensord(keys=["image", "label"],),
        ]
    )
    val_transform = transforms.Compose(
        [   
            # transforms.ScaleIntensityRanged(
            #     keys=["image"], a_min=-1000, a_max=1250.0, b_min=0, b_max=1.0, clip=True
            # ),
            transforms.CropForegroundd(keys=["image", "label"], source_key="image"), 
            transforms.ToTensord(keys=["image", "label"]),
        ]
    )

    train_ds = MedicalDataset(train_image_dir, train_label_dir, transform=train_transform)
    
    # val_ds = MedicalDataset(val_image_dir, val_label_dir, transform=val_transform)

    test_ds = MedicalDataset(test_image_dir, test_label_dir, transform=val_transform)

    loader = [train_ds, test_ds, test_ds]

    return loader
