from skorch.core import *
from skorch.vision import *
from sklearn.model_selection import train_test_split

pkg_resources.require("albumentations>=0.2.2")

import albumentations as albu

def denormalize_image(img:TensorImage, mean, std, max_pixel_value=255.0):
    mean, std = tensor(mean), tensor(std)
    img = img.cpu().float()*std[...,None,None] + mean[...,None,None]
    img *= max_pixel_value
    return img.numpy().astype(np.uint8)

dr_stats = [[0.52952275, 0.52952275, 0.52952275], [0.27506839, 0.27506839, 0.27506839]]
jsrt_stats = [[0.50696618, 0.50696618, 0.50696618], [0.2916163, 0.2916163, 0.2916163]]

IMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', '.tiff', 'webp']

def create_segment_multi_augmenter(height=256, width=256):
    "#albumentations支持多mask的输入"
    transformations = albu.Compose([
        albu.Resize(height=height, width=width, p=1.0),
        albu.HorizontalFlip(p=0.5),
        albu.OneOf([albu.RandomBrightnessContrast(brightness_limit=.1, p=0.5),
                    albu.RandomGamma(gamma_limit=(20, 40), p=0.5)]),
        albu.ShiftScaleRotate(rotate_limit=5, p=0.5)
    ])

    target = {}
    target['image1'] = 'image'
    target['mask1'] = 'mask'
    target['mask2'] = 'mask'
    target['mask3'] = 'mask'
    return albu.Compose(transformations, p=1.0, additional_targets=target)


def create_classify_augmenter(height=256, width=256):
    transformations = albu.Compose([
        albu.Resize(height=600, width=600, p=1.0),
        albu.OneOf([
            albu.RandomSizedCrop([500, 550], height, width, p=1.0),
            albu.Resize(height=height, width=width, p=1.0),
        ], p=1.0),

        albu.HorizontalFlip(p=0.5),

        albu.OneOf([albu.Blur(blur_limit=3, p=0.3),
                    albu.CLAHE(clip_limit=4.0, tile_grid_size=(8, 8), p=0.3),]),
        albu.HorizontalFlip(p=0.5),
        albu.OneOf([albu.RandomBrightnessContrast(brightness_limit=.1, p=0.3),
                    albu.RandomGamma(gamma_limit=(20, 40), p=0.3)]),

        albu.ShiftScaleRotate(rotate_limit=5, p=0.5)
    ])
    return transformations


def load_json_flag(json_file, flag_text:Collection[str]):
    with open(str(json_file)) as f:
        flag_indexs = []
        data = json.load(f)
        for key in data['flags']:
            if key not in flag_text:
                print(json_file)
                break
        flags = data['flags']
        if flags['__ignore__']:
            return flag_indexs

        for key, value in flags.items():
            if value:
                flag_indexs.append(flag_text.index(key)-1)
        return flag_indexs


def load_json_multi_label(json_file):
    flag_text = ["__ignore__",
                 "excellent",
                 "FB_inside_body",
                 "FB_inside_lung_field",
                 "FB_outside_lung_field"]
    flag_indexs = load_json_flag(json_file, flag_text)
    hot_label = one_hot(flag_indexs, len(flag_text) - 1)
    return hot_label


def has_file_allowed_extension(filename, extensions):
    """Checks if a file is an allowed extension.

    Args:
        filename (string): path to a file
        extensions (iterable of strings): extensions to consider (lowercase)

    Returns:
        bool: True if the filename ends with one of given extensions
    """
    filename_lower = filename.lower()
    return any(filename_lower.endswith(ext) for ext in extensions)


def make_dataset(dir, class_to_idx, extensions):
    images = []
    dir = os.path.expanduser(dir)
    for target in sorted(class_to_idx.keys()):
        d = os.path.join(dir, target)
        if not os.path.isdir(d):
            continue

        for root, _, fnames in sorted(os.walk(d)):
            for fname in sorted(fnames):
                if has_file_allowed_extension(fname, extensions):
                    path = os.path.join(root, fname)
                    item = (path, class_to_idx[target])
                    images.append(item)

    return images


class DRSegmentDataset(Dataset):
    def __init__(self, data_path:PathOrStr, augmentation_func=None, stats:Collection[Tensor]=None, target_size:Sizes=None):
        super().__init__()
        if is_listy(data_path):
            self.data_path = data_path[0].parents[1]
            self.image_items = sorted(data_path)
        else:
            self.data_path = Path(data_path)
            self.image_items = sorted(self.data_path.joinpath('images').glob('*.png'))
        self.augmentation = None
        self.target_size = target_size
        if augmentation_func:
            self.augmentation = augmentation_func(*self.target_size)
        if stats:
            self.stats = stats

    def __len__(self):
        return len(self.image_items)

    def transform_tensor(self, image:NPImage, mask:NPArrayList):
        image = image_to_tensor(image, self.stats)
        if is_listy(mask):
            mask = [mask_to_tensor(o) for o in mask]
            mask = cat(mask, dim=0)
        else:
            mask = mask_to_tensor(mask)
        return image, mask

    def __getitem__(self, idx):
        sample_image_item = self.image_items[idx]
        lung_mask_item = self.data_path.joinpath('masks', 'lung', sample_image_item.with_suffix('.png').name)
        clavicle_mask_item = self.data_path.joinpath('masks', 'clavicle', sample_image_item.with_suffix('.png').name)
        heart_mask_item =  self.data_path.joinpath('masks', 'shoulderblade', sample_image_item.with_suffix('.png').name)

        #1.read origin image
        img = load_image_obj(str(sample_image_item), 'opencv')
        img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
        # if self.target_size: img = resize_image(img, self.target_size)

        lung_mask = load_image_obj(str(lung_mask_item), 'opencv')
        # if self.target_size:
        #     lung_mask = resize_mask(lung_mask, self.target_size)

        clavicle_mask = load_image_obj(str(clavicle_mask_item), 'opencv')
        # if self.target_size:
        #     clavicle_mask = resize_mask(clavicle_mask, self.target_size)

        shoulderblade_mask = load_image_obj(str(heart_mask_item), 'opencv')
        # if self.target_size:
        #     heart_mask = resize_mask(heart_mask, self.target_size)

        #2.augmente
        if self.augmentation:
            aug_image = self.augmentation(image=img,
                                    mask1=lung_mask,
                                    mask2=clavicle_mask,
                                    mask3=shoulderblade_mask)
            img = aug_image['image']
            lung_mask = aug_image['mask1']
            clavicle_mask = aug_image['mask2']
            shoulderblade_mask = aug_image['mask3']

        return self.transform_tensor(img, [lung_mask, clavicle_mask, shoulderblade_mask])


class DRClassifyDataset(Dataset):
    def __init__(self, data_path: PathOrStr, augmentation_func=None, stats: Collection[Tensor] = None, target_size: Sizes = None):
        """
        classification
        :param data_path:
        :param augmentation_func:
        :param stats:
        :param target_size:
        """
        super().__init__()
        if is_listy(data_path):
            self.data_path = data_path[0].parents[1]
            self.json_items = sorted(data_path)
        else:
            self.data_path = Path(data_path)
            self.json_items = sorted(self.data_path.joinpath('images').glob('*.json'))
        self.augmentation = None
        self.target_size = target_size
        if augmentation_func:
            self.augmentation = augmentation_func(*self.target_size)
        if stats:
            self.stats = stats

    def __len__(self):
        return len(self.json_items)

    def transform_tensor(self, image:NPImage):
        return image_to_tensor(image, self.stats)

    def __getitem__(self, idx):
        sample_json_item = self.json_items[idx]
        sample_image_item = sample_json_item.with_suffix('.dcm')

        #1.read origin image and label
        img = load_image_obj(str(sample_image_item), 'pydcm')   # 修改读入方式
        img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
        label = load_json_multi_label(sample_json_item)
        dcm_name = sample_image_item.name

        #2.augmente
        if self.augmentation:
            aug_image = self.augmentation(image=img)
            img = aug_image['image']

        return self.transform_tensor(img), label
        # return dcm_name, label

class DatasetTuple(Dataset):
    """A generic data loader where the samples are arranged in this way: ::

        root/class_x/xxx.ext
        root/class_x/xxy.ext
        root/class_x/xxz.ext

        root/class_y/123.ext
        root/class_y/nsdf3.ext
        root/class_y/asd932_.ext

    Args:
        root (string): Root directory path.
        loader (callable): A function to load a sample given its path.
        extensions (list[string]): A list of allowed extensions.
        transform (callable, optional): A function/transform that takes in
            a sample and returns a transformed version.
            E.g, ``transforms.RandomCrop`` for images.
        target_transform (callable, optional): A function/transform that takes
            in the target and transforms it.

     Attributes:
        class_to_idx (dict): Dict with items (class_name, class_index).
        samples (list): List of (sample path, class_index) tuples
        targets (list): The class_index value for each image in the dataset
    """

    def __init__(self, samples:ListOrItem, class_to_idx, augmentation_func=None, stats:Collection[Tensor]=None, target_size:Sizes=None):
        super().__init__()
        self.extensions = IMG_EXTENSIONS

        self.classes = list(class_to_idx.keys()).sort()
        self.class_to_idx = class_to_idx
        self.samples = samples
        self.targets = [s[1] for s in samples]

        self.augmentation = None
        self.target_size = target_size
        if augmentation_func:
            self.augmentation = augmentation_func(*self.target_size)
        if stats:
            self.stats = stats

    def transform_tensor(self, image:NPImage):
        return image_to_tensor(image, self.stats)

    def __getitem__(self, index):
        """
        Args:
            index (int): Index

        Returns:
            tuple: (sample, target) where target is class_index of the target class.
        """
        path, label = self.samples[index]
        # 1.read origin image and label
        img = load_image_obj(str(path), 'opencv')
        img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)

        # 2.augmente
        if self.augmentation:
            aug_image = self.augmentation(image=img)
            img = aug_image['image']

        return self.transform_tensor(img), label

    def __len__(self):
        return len(self.samples)


def create_floder_classify_datasets(data_path:Path, augmentation_func=None, stats:Collection[Tensor]=None, target_size:Sizes=None):
    def _find_classes(dir):
        """
        Finds the class folders in a dataset.

        Args:
            dir (string): Root directory path.

        Returns:
            tuple: (classes, class_to_idx) where classes are relative to (dir), and class_to_idx is a dictionary.

        Ensures:
            No class is a subdirectory of another.
        """
        if sys.version_info >= (3, 5):
            # Faster and available in Python 3.5 and above
            classes = [d.name for d in os.scandir(dir) if d.is_dir()]
        else:
            classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]
        classes.sort()
        class_to_idx = {classes[i]: i for i in range(len(classes))}
        return class_to_idx

    class_to_idx = _find_classes(data_path)
    samples = make_dataset(data_path, class_to_idx, IMG_EXTENSIONS)

    if len(samples) == 0:
        raise (RuntimeError("Found 0 files in subfolders of: " + data_path + "\n"
                                                                             "Supported extensions are: " + ",".join(
            IMG_EXTENSIONS)))

    x_train, x_test = train_test_split(samples, test_size=0.15, shuffle=True)

    train_ds = DatasetTuple(x_train, class_to_idx, augmentation_func=augmentation_func,
                            stats=stats, target_size=target_size)
    val_ds = DatasetTuple(x_test, class_to_idx, augmentation_func=augmentation_func,
                          stats=stats, target_size=target_size)
    return train_ds, val_ds



def create_dr_segment_datasets(data_path:Path, augmentation_func=None, stats:Collection[Tensor]=None, target_size:Sizes=None):
    """
    create dr segmentation datasets
    :param data_path: data path
    :param augmentation_func: augmentation function
    :param stats: mean and std
    :param target_size:
    :return:
    """
    data_path = Path(data_path)
    image_items = sorted(data_path.joinpath('images').glob('*.create_dr_classify_datasets'))
    x_train, x_test = train_test_split(image_items, test_size=0.25, shuffle=True)

    train_ds = DRSegmentDataset(x_train, augmentation_func=augmentation_func, stats=stats, target_size=target_size)
    valida_ds = DRSegmentDataset(x_test, augmentation_func=augmentation_func, stats=stats, target_size=target_size)
    return train_ds, valida_ds


def create_dr_classify_datasets(data_path: PathList, augmentation_func=None, stats: Collection[Tensor]=None, target_size:Sizes=None):
    image_items=[]
    if is_listy(data_path):
        for dp in data_path:
            dp = Path(dp)
            image_items.extend(sorted(dp.glob('*.json')))     # .joinpath('dcms')
    else:
        data_path = Path(data_path)
        image_items.extend(sorted(data_path.glob('*.json')))  # .joinpath('dcms')
    x_train, x_test = train_test_split(image_items, test_size=0.25, shuffle=True)

    train_ds = DRClassifyDataset(x_train, augmentation_func=augmentation_func, stats=stats, target_size=target_size)
    valida_ds = DRClassifyDataset(x_test, augmentation_func=augmentation_func, stats=stats, target_size=target_size)
    return train_ds, valida_ds


def test_segment_datasets():
    test_dataset = DRSegmentDataset('/home/blake/data/dataset/datasets/DR/dicom/DR', create_segment_multi_augmenter, jsrt_stats, target_size=(512,512))
    bs=2
    trainloader = DataLoader(test_dataset, batch_size=bs, num_workers=0)
    for i, data_samples in enumerate(trainloader):
        imgs, masks = data_samples
        imgs = to_detach(imgs, cpu=True)
        show_imgs = denormalize_image(imgs, jsrt_stats[0], jsrt_stats[1])

        show_imgs = np.transpose(show_imgs,[0, 2, 3, 1])
        print(show_imgs.shape)
        masks= to_detach(masks, cpu=True)
        masks = to_np(masks)
        print(show_imgs[0].shape)
        print(Counter(np.squeeze(masks[0][1]).flatten()))
        print(Counter(np.squeeze(masks[0][1]).flatten()))
        print(Counter(np.squeeze(masks[0][1]).flatten()))

        f, axarr = plt.subplots(bs, 4)
        for j in range(bs):
            axarr[j][0].imshow(show_imgs[j])
            axarr[j][1].imshow(np.squeeze(masks[j][0]))
            axarr[j][2].imshow(np.squeeze(masks[j][1]))
            axarr[j][3].imshow(np.squeeze(masks[j][2]))
        plt.show()
        a = input()
        if a == "ex":
            break
        else:
            plt.close()

def test_classify_datasets():
    test_dataset = DRClassifyDataset('/home/blake/data/dataset/datasets/DR/dicom/DR', create_classify_augmenter, jsrt_stats, target_size=(512,512))
    bs=2
    trainloader = DataLoader(test_dataset, batch_size=bs, num_workers=0)
    for i, data_samples in enumerate(trainloader):
        imgs, label = data_samples
        imgs = to_detach(imgs, cpu=True)
        show_imgs = denormalize_image(imgs, jsrt_stats[0], jsrt_stats[1])

        show_imgs = np.transpose(show_imgs,[0, 2, 3, 1])
        label = label.cpu().numpy()
        print(show_imgs.shape)
        print(label.shape)

        f, axarr = plt.subplots(bs, 1)
        for j in range(bs):
            name = np.array2string(label[j], precision=2, separator=',',suppress_small=True)
            axarr[j].set_title(name, fontsize=8)
            axarr[j].imshow(show_imgs[j])

        plt.show()
        a = input()
        if a == "ex":
            break
        else:
            plt.close()


if __name__ == '__main__':
    print('')
    #test_segment_datasets()
    test_classify_datasets()