from os.path import splitext
from os import listdir
import numpy as np
import glob
import torch
from torch.utils.data import Dataset, DataLoader, ConcatDataset
import logging
from PIL import Image
import matplotlib.pyplot as plt 
from slice_builder import SliceBuilder
from augment import transforms
from augment.transforms import Transformer
import os 
from itertools import chain
import h5py
import random
import collections

## 关于数据的配置都在这里

#### 细胞分割数据集配置
# patch_shape = (32, 64, 64)
# stride_shape = (8, 16, 16)
# num_workers = 1
# batch_size = 1
# # ## 自己的数据集配置4 肿瘤
patch_shape = (8, 128, 128)
stride_shape = (4, 32, 32)
# stride_shape = (8, 64, 64)
num_workers = 1
batch_size = 1
# # # ## 自己的数据集配置4 水肿
# patch_shape = (8, 256, 256)
# stride_shape = (4, 64, 64)
# # stride_shape = (8, 64, 64)
# num_workers = 1
# batch_size = 1


## 使用于 一张图片为一个文件夹然后里面分为mask和images。
## 这个主要用于2D
class BasicDataset(Dataset):
    def __init__(self, data_dir, img_size=(256, 256)):
        self.data_dir = data_dir 
        self.img_size = img_size
        self.ids = [file for file in listdir(data_dir)
                    if not file.startswith('.')]
        logging.info(f'Creating dataset with {len(self.ids)} examples')

    def __len__(self):
        return len(self.ids)

    @classmethod
    def preprocess(cls, pil_img, img_size):
        ## pil_img 是由Image.open() 得到的
        # print("size: " + str(pil_img.size))
        pil_img = pil_img.resize(img_size)
        # print(pil_img.size)

        img_nd = np.array(pil_img)
        # print("reszie:" + str(img_nd.shape))

        if len(img_nd.shape) == 2:
            img_nd = np.expand_dims(img_nd, axis=2)

        # HWC to CHW
        img_trans = img_nd.transpose((2, 0, 1))
        if img_trans.max() > 1:
            img_trans = img_trans / 255

        ## 如果使用3D Unet 则需要添加一个深度维度！
        img_trans = np.expand_dims(img_trans, axis=0)

        return img_trans

    def __getitem__(self, i):
        idx = self.ids[i] # 得到文件夹
        img_path = self.data_dir + idx + "/" + "images/" + idx + ".png" # 获取到一张图片
        mask_path = self.data_dir + idx + "/save_masks/" + idx + ".png" # 获取到一张对应的mask
       
        mask = Image.open(mask_path).convert('L')
        img = Image.open(img_path).convert('L')

        assert img.size == mask.size, \
            f'Image and mask {idx} should be the same size, but are {img.size} and {mask.size}'

        img = self.preprocess(img, self.img_size)
        mask = self.preprocess(mask, self.img_size)

        # print(img.shape)

        return {
            'image': torch.from_numpy(img).type(torch.FloatTensor),
            'mask': torch.from_numpy(mask).type(torch.FloatTensor)
        }


class AbstractHDF5Dataset(Dataset):
    """
    Implementation of torch.utils.data.Dataset backed by the HDF5 files, which iterates over the raw and label datasets
    patch by patch with a given stride.
    """

    def __init__(self, file_path,
                 phase,
                 mirror_padding=None,
                 raw_internal_path='raw',
                 label_internal_path='label',
                 weight_internal_path=None):
        """
        :param file_path: path to H5 file containing raw data as well as labels and per pixel weights (optional)
        :param phase: 'train' for training, 'val' for validation, 'test' for testing; data augmentation is performed
            only during the 'train' phase
        :para'/home/adrian/workspace/ilastik-datasets/VolkerDeconv/train'm slice_builder_config: configuration of the SliceBuilder
        :param transformer_config: data augmentation configuration
        :param mirror_padding (int or tuple): number of voxels padded to each axis
        :param raw_internal_path (str or list): H5 internal path to the raw dataset
        :param label_internal_path (str or list): H5 internal path to the label dataset
        :param weight_internal_path (str or list): H5 internal path to the per pixel weights
        """
        assert phase in ['train', 'val', 'test']
    
        self.phase = phase
        self.file_path = file_path

        # 先打个一个h5文件，里面可能包含一些key 比如:raw: [] value 就是对应数据 
        # 所以这个key 其实就是internal path ！
        input_file = self.create_h5_file(file_path)
        # 拿到原始数据 因为要拿raw原始数据 那么raw internal path 就是raw 
        # self.raws = self.fetch_and_check(input_file, raw_internal_path)
        self.raws = input_file[raw_internal_path]
        print(f"data shape is : {self.raws.shape}")
        # min_value, max_value, mean, std = self.ds_stats()

        # self.transformer = transforms.get_transformer(transformer_config, min_value=min_value, max_value=max_value,
        #                                               mean=mean, std=std)
        self.values = self.ds_stats()

        self.labels = None 
        if phase != 'test':
            self.labels = self.fetch_and_check(input_file, label_internal_path)
            ## 在这里剪裁一下图片。
            ## todo 

        # build slice indices for raw and label data sets
        slice_builder = SliceBuilder(self.raws, self.labels, None, patch_shape=patch_shape, stride_shape=stride_shape)
        # slice_builder = get_slice_builder(self.raws, self.labels, self.weight_maps, slice_builder_config)
        self.raw_slices = slice_builder.raw_slices
        self.label_slices = slice_builder.label_slices
        self.weight_slices = slice_builder.weight_slices

        self.patch_count = len(self.raw_slices)
        logging.info(f'Number of patches: {self.patch_count}')

    def ds_stats(self):
        # calculate global min, max, mean and std for normalization
        min_value, max_value, mean, std = calculate_stats(self.raws)
        print(f"Input stats: min={min_value}, max={max_value}, mean={mean}, std={std}")
        # logger.info(f'Input stats: min={min_value}, max={max_value}, mean={mean}, std={std}')
        return min_value, max_value, mean, std

    @staticmethod
    def create_h5_file(file_path):
        raise NotImplementedError

    @staticmethod
    def fetch_datasets(input_file_h5, internal_paths):
        raise NotImplementedError

    def fetch_and_check(self, input_file_h5, internal_paths):
        datasets = self.fetch_datasets(input_file_h5, internal_paths)
        # 如果是label 则是对应mask 数据
        return datasets

    def __getitem__(self, idx):
        if idx >= len(self):
            raise StopIteration

        # get the slice for a given index 'idx'
        raw_idx = self.raw_slices[idx]
    
        transformer = Transformer(self.values)
        raw_transform = transformer.raw_transform() # 对原始数据进行transformer的pipline 数据增强
        if self.phase == "test" or self.phase == "val":
            # 测试的时候不过多的处理图片数据
            raw_transform = transformer.raw_val_transform()

        # get the raw data patch for a given slice
        raw_patch_transformed = self._transform_patches(self.raws, raw_idx, raw_transform)
        # raw_patch_transformed = self.raws[raw_idx].astype(np.float32)

        if self.phase == 'test':
            # 整个返回的raw_idx 是为了去还原分片用的！！！！
            # example: raw_idx:[(0, 32), (0, 64), (0, 64)] 这是第一片
            # discard the channel dimension in the slices: predictor requires only the spatial dimensions of the volume
            if len(raw_idx) == 4:
                raw_idx = raw_idx[1:]
            # print(raw_patch_transformed.shape)
            # label_transform = transformer.label_val_transform()
            # get the slice for a given index 'idx'
            # label_idx = self.label_slices[idx]
            # label_patch_transformed = self.labels[label_idx].astype(np.float32)
            # label_patch_transformed = self._transform_patches(self.labels, label_idx, label_transform)
            return raw_patch_transformed, raw_idx
        elif self.phase == "train":
            label_transform = transformer.label_transform()
            # get the slice for a given index 'idx'
            label_idx = self.label_slices[idx]

            # label_patch_transformed = self.labels[label_idx].astype(np.float32)
            label_patch_transformed = self._transform_patches(self.labels, label_idx, label_transform)

            # return the transformed raw and label patches
            return raw_patch_transformed, label_patch_transformed
        elif self.phase == "val":
            label_transform = transformer.label_val_transform()
            # get the slice for a given index 'idx'
            label_idx = self.label_slices[idx]

            # label_patch_transformed = self.labels[label_idx].astype(np.float32)
            label_patch_transformed = self._transform_patches(self.labels, label_idx, label_transform)

            # return the transformed raw and label patches
            return raw_patch_transformed, label_patch_transformed

    @staticmethod
    def _transform_patches(dataset, label_idx, transformer):
        
        # datasets 是一个list 里面只有一个元素 dataset
        
            # get the label data and apply the label transformer
        transformed_patch = transformer(dataset[label_idx])

        return transformed_patch

    def __len__(self):
        return self.patch_count

    @staticmethod
    def _check_dimensionality(raws, labels):
        # 这个函数是为了检查一下维度
        def _volume_shape(volume):
            if volume.ndim == 3:
                return volume.shape
            return volume.shape[1:]

        for raw, label in zip(raws, labels):
            assert raw.ndim in [3, 4], 'Raw dataset must be 3D (DxHxW) or 4D (CxDxHxW)'
            assert label.ndim in [3, 4], 'Label dataset must be 3D (DxHxW) or 4D (CxDxHxW)'

            assert _volume_shape(raw) == _volume_shape(label), 'Raw and labels have to be of the same size'

    @classmethod
    # def create_datasets(cls, dataset_config, phase):
    def create_datasets(cls, file_dir, phase="train"):
        """
        phase 就是为了区分train 还是test
        """
        file_paths = cls.traverse_h5_paths(file_dir) # 传入dir 这是个list 然后去拿到所有的h5文件的路径

        datasets = []
        if phase == "train" or phase == "val":
            for file_path in file_paths:
                # try:
                # logging.info(f'Loading {phase} set from: {file_path}...')
                print(f"loading {phase} set from : {file_path}")
                dataset = cls(file_path=file_path,
                                phase=phase,
                                raw_internal_path = "raw",
                                label_internal_path = "label",
                                weight_internal_path=None)
                datasets.append(dataset)
                # except Exception:
                #     print("错误")
                #     logging.info("数据集加载错误～")
                #     import os 
                #     os._exit(0)
            return datasets
        else :
            labels = []
            for file_path in file_paths:
                # try:
                # logging.info(f'Loading {phase} set from: {file_path}...')
                label_data = h5py.File(file_path)
                labels.append(label_data["label"][()]) # 取出来真实的数据
                label_data.close()
                # print(f"loading {phase} set from : {file_path}")
                dataset = cls(file_path=file_path,
                                phase=phase,
                                raw_internal_path = "raw",
                                label_internal_path = "label",
                                weight_internal_path=None)
                datasets.append(dataset)
               
            return datasets, labels

    @classmethod
    # def create_datasets(cls, dataset_config, phase):
    def create_mini_datasets(cls, file_dir, phase="train"):
        """
        phase 就是为了区分train 还是test
        """
        file_paths = cls.traverse_h5_paths(file_dir) # 传入dir 这是个list 然后去拿到所有的h5文件的路径
        file_num = len(file_paths)
        random_n = random.randint(0, file_num-1)
        # random_n2 = random.randint(0, file_num-1)
        file_paths = [file_paths[random_n]] # 只取2个病人的数据训练
        datasets = []
        for file_path in file_paths:
            # try:
            # logging.info(f'Loading {phase} set from: {file_path}...')
            # print(f"loading {phase} set from : {file_path}")
            dataset = cls(file_path=file_path,
                            phase=phase,
                            raw_internal_path = "raw",
                            label_internal_path = "label",
                            weight_internal_path=None)
            datasets.append(dataset)
            # except Exception:
            #     print("错误")
            #     logging.info("数据集加载错误～")
            #     import os 
            #     os._exit(0)
        return datasets

    @staticmethod
    def traverse_h5_paths(file_paths):
        ## 拿到所有h5文件路径！ 输入一个list，输出list文件夹下面所有的h5文件路径
        ## 因为 需要 肿瘤和 水肿单独训练，因此呢，分开来训练数据，先用肿瘤吧～
        if isinstance(file_paths, str):
            file_paths = [file_paths]
        results = []
        for file_path in file_paths:
            if os.path.isdir(file_path):
                # if file path is a directory take all H5 files in that directory
                iters = glob.glob(os.path.join(file_path, "*.h5"))
                # for fp in chain(*iters):
                #     results.append(fp)
                for f in iters:
                    if "fl" in f:
                    # 说明是t1 序列
                        results.append(f)
           
        # print(results)
        return results


class StandardHDF5Dataset(AbstractHDF5Dataset):
    """
    Implementation of the HDF5 dataset which loads the data from all of the H5 files into the memory.
    Fast but might consume a lot of memory.
    """

    def __init__(self, file_path, phase, mirror_padding=None,
                 raw_internal_path='raw', label_internal_path='label', weight_internal_path=None):
        super().__init__(file_path=file_path,
                         phase=phase,
                         mirror_padding=mirror_padding,
                         raw_internal_path=raw_internal_path,
                         label_internal_path=label_internal_path,
                         weight_internal_path=weight_internal_path)

    @staticmethod
    def create_h5_file(file_path):
        return h5py.File(file_path, 'r')

    @staticmethod
    def fetch_datasets(input_file_h5, internal_path):
        return input_file_h5[internal_path]

def get_train_loaders(train_paths, test_paths):
    """
    Returns dictionary containing the training and validation loaders (torch.utils.data.DataLoader).

    :param config: a top level configuration object containing the 'loaders' key
    :return: dict {
        'train': <train_loader>
        'val': <val_loader>
    }
    """
    # assert 'loaders' in config, 'Could not find data loaders configuration'
    # loaders_config = config['loaders']

    # logger.info('Creating training and validation set loaders...')

    # get dataset class

    train_datasets = StandardHDF5Dataset.create_datasets(train_paths, phase='train')
    val_datasets = StandardHDF5Dataset.create_datasets(test_paths, phase='val')

    print(f'Number of workers for train/val dataloader: {num_workers}')


    print(f'Batch size for train/val loader: {batch_size}')
    # when training with volumetric data use batch_size of 1 due to GPU memory constraints
    return {
        'train': DataLoader(ConcatDataset(train_datasets), batch_size=batch_size, shuffle=True,
                            num_workers=num_workers),
        'val': DataLoader(ConcatDataset(val_datasets), batch_size=batch_size, shuffle=True, num_workers=num_workers)
    }

def get_test_loaders(test_path):
    """
    Returns test DataLoader.

    :return: generator of DataLoader objects
    """
    print('Creating test set loaders...')

    # get dataset class
    # dataset_cls_str = loaders_config.get('dataset', None)
    # if dataset_cls_str is None:
    #     dataset_cls_str = 'StandardHDF5Dataset'
    #     logger.warn(f"Cannot find dataset class in the config. Using default '{dataset_cls_str}'.")
    # dataset_class = _get_cls(dataset_cls_str)

    test_datasets, labels = StandardHDF5Dataset.create_datasets(test_path, phase='test')

    # print(f'Number of workers for the dataloader: {num_workers}')

    # print(f'Batch size for dataloader: {batch_size}')

    # use generator in order to create data loaders lazily one by one
    for test_dataset, each_label in zip(test_datasets, labels):
        # print(f'Loading test set from: {test_dataset.file_path}...')
        yield DataLoader(test_dataset, batch_size=batch_size, num_workers=num_workers, collate_fn=prediction_collate), each_label
        # yield DataLoader(test_dataset, batch_size=batch_size, num_workers=num_workers)


# def prediction_collate(batch):
#     # return batch 
#     # print("collate")
#     # print("batch is : " + str(batch))
#     error_msg = "batch must contain tensors or slice; found {}"
#     if isinstance(batch[0], torch.Tensor):
#         print("1111")
#         return torch.stack(batch, 0)
#     elif isinstance(batch[0], tuple) and isinstance(batch[0][0], slice):
#         print("222")
#         ## test的时候是走这个if！！batch[0]表示一个batch中的元素， batch[0]是一个元组，因为里面有两个东西，一个是数据，另一个是切片slice
#         return batch
#     elif isinstance(batch[0], tuple) and isinstance(batch[0][1], str):
#         print("333")
#         return batch[0]
#     elif isinstance(batch[0], collections.Sequence):
#         print(isinstance(batch[0], tuple))
#         print("batch is :" + str(batch))
#         print(type(batch[0]))
#         print("444")
#         transposed = zip(*batch)
#         # print(transposed)
        
#         return [prediction_collate(samples) for samples in transposed]
    # raise TypeError((error_msg.format(type(batch[0]))))

def prediction_collate(batch):
    data = [item[0] for item in batch]
    indice = [item[1] for item in batch]
    return torch.stack(data, 0), indice


def calculate_stats(images):
    """
    Calculates min, max, mean, std given a list of ndarrays
    """
    # flatten first since the images might not be the same size
    flat = np.concatenate(
        [img.astype(np.float32).ravel() for img in images]
    )
    return np.min(flat), np.max(flat), np.mean(flat), np.std(flat)

if __name__ == "__main__":
    pass