import torch
import torch.utils.data
import numpy as np
import cv2
from torchvision import transforms
import pandas as pd
from skimage import io, transform
from src import const
import random
import skimage
import imgaug as ia
from imgaug import augmenters as iaa


def get_affine():
    # Sometimes(0.5, ...) applies the given augmenter in 50% of all cases,
    # e.g. Sometimes(0.5, GaussianBlur(0.3)) would blur roughly every second image.
    def sometimes(aug): return iaa.Sometimes(0.5, aug)

    def often(aug): return iaa.Sometimes(0.8, aug)

    # Define our sequence of augmentation steps that will be applied to every image
    # All augmenters with per_channel=0.5 will sample one value _per image_
    # in 50% of all cases. In all other cases they will sample new values
    # _per channel_.
    seq = iaa.Sequential(
        [
            # apply the following augmenters to most images
            iaa.Fliplr(0.5),  # horizontally flip 50% of all images
            #         iaa.Flipud(0.2), # vertically flip 20% of all images
            # crop images by -5% to 10% of their height/width
            iaa.CropAndPad(
                percent=(-0.05, 0.1),
                pad_mode=ia.ALL,
                pad_cval=(0, 255)
            ),
            iaa.Affine(
                scale={"x": (0.8, 1.2), "y": (0.8, 1.2)},  # scale images to 80-120% of their size, individually per axis
                translate_percent={"x": (-0.3, 0.3), "y": (-0.3, 0.3)},  # translate by -20 to +20 percent (per axis)
                rotate=(-60, 60),  # rotate by -45 to +45 degrees
                shear=(-16, 16),  # shear by -16 to +16 degrees
                order=[0, 1],  # use nearest neighbour or bilinear interpolation (fast)
                cval=(0, 255),  # if mode is constant, use a cval between 0 and 255
                mode=ia.ALL  # use any of scikit-image's warping modes (see 2nd image from the top for examples)
            ),
        ],
        random_order=True
    )
    return seq


def get_more_affine():
    # Sometimes(0.5, ...) applies the given augmenter in 50% of all cases,
    # e.g. Sometimes(0.5, GaussianBlur(0.3)) would blur roughly every second image.
    def sometimes(aug): return iaa.Sometimes(0.5, aug)

    def often(aug): return iaa.Sometimes(0.8, aug)

    # Define our sequence of augmentation steps that will be applied to every image
    # All augmenters with per_channel=0.5 will sample one value _per image_
    # in 50% of all cases. In all other cases they will sample new values
    # _per channel_.
    seq = iaa.Sequential(
        [
            # apply the following augmenters to most images
            iaa.Fliplr(0.5),  # horizontally flip 50% of all images
            iaa.Flipud(0.2),  # vertically flip 20% of all images
            # crop images by -5% to 10% of their height/width
            iaa.CropAndPad(
                percent=(-0.05, 0.1),
                pad_mode=ia.ALL,
                pad_cval=(0, 255)
            ),
            iaa.Affine(
                scale={"x": (0.5, 1.5), "y": (0.5, 1.5)},  # scale images to 80-120% of their size, individually per axis
                translate_percent={"x": (-0.3, 0.3), "y": (-0.3, 0.3)},  # translate by -20 to +20 percent (per axis)
                rotate=(-90, 90),  # rotate by -45 to +45 degrees
                shear=(-16, 16),  # shear by -16 to +16 degrees
                order=[0, 1],  # use nearest neighbour or bilinear interpolation (fast)
                cval=(0, 255),  # if mode is constant, use a cval between 0 and 255
                mode=ia.ALL  # use any of scikit-image's warping modes (see 2nd image from the top for examples)
            ),
            iaa.SomeOf((0, 2),
                       [
                sometimes(iaa.Superpixels(p_replace=(0, 1.0), n_segments=(20, 200))),  # convert images into their superpixel representation
                iaa.OneOf([
                    iaa.GaussianBlur((0, 3.0)),  # blur images with a sigma between 0 and 3.0
                    iaa.AverageBlur(k=(2, 7)),  # blur image using local means with kernel sizes between 2 and 7
                    iaa.MedianBlur(k=(3, 11)),  # blur image using local medians with kernel sizes between 2 and 7
                ]),
                iaa.Sharpen(alpha=(0, 1.0), lightness=(0.75, 1.5)),  # sharpen images
                iaa.Emboss(alpha=(0, 1.0), strength=(0, 2.0)),  # emboss images
                iaa.ContrastNormalization((0.5, 2.0), per_channel=0.5),  # improve or worsen the contrast
                iaa.Grayscale(alpha=(0.0, 1.0)),
            ],
                random_order=True
            )
        ],
        random_order=True
    )
    return seq


class RandomFlip(object):

    def __call__(self, image):
        h, w = image.shape[:2]
        if np.random.rand() > 0.5:
            image = np.fliplr(image)

        return image


class CenterCrop(object):

    def __init__(self, output_size):
        assert isinstance(output_size, (int, tuple))
        if isinstance(output_size, int):
            self.output_size = (output_size, output_size)
        else:
            assert len(output_size) == 2
            self.output_size = output_size

    def __call__(self, image):
        h, w = image.shape[:2]
        new_h, new_w = self.output_size

        top = int((h - new_h) / 2)
        left = int((w - new_w) / 2)

        image = image[top: top + new_h,
                      left: left + new_w]

        return image


class RandomCrop(object):
    """Crop randomly the image in a sample.

    Args:
        output_size (tuple or int): Desired output size. If int, square crop
            is made.
    """

    def __init__(self, output_size):
        assert isinstance(output_size, (int, tuple))
        if isinstance(output_size, int):
            self.output_size = (output_size, output_size)
        else:
            assert len(output_size) == 2
            self.output_size = output_size

    def __call__(self, image):
        h, w = image.shape[:2]
        new_h, new_w = self.output_size

        top = random.randint(0, h - new_h)
        left = random.randint(0, w - new_w)

        image = image[top: top + new_h,
                      left: left + new_w]

        return image


class RandomRescale(object):

    def __init__(self, output_size_range):
        '''
        output_size_range指将短边缩放到的范围
        '''
        assert isinstance(output_size_range, tuple)
        self.lower_size = int(output_size_range[0])
        self.upper_size = int(output_size_range[1])

    def gen_output_size(self):
        return random.randint(self.lower_size, self.upper_size)

    def __call__(self, image):
        h, w = image.shape[:2]
        output_size = self.gen_output_size()
        if h > w:
            new_h, new_w = output_size * h / w, output_size
        else:
            new_h, new_w = output_size, output_size * w / h

        new_h, new_w = int(new_h), int(new_w)

        img = transform.resize(image, (new_h, new_w), mode='constant')

        return img


class Rescale(object):
    """Rescale the image in a sample to a given size.

    Args:
        output_size (tuple or int): Desired output size. If tuple, output is
            matched to output_size. If int, smaller of image edges is matched
            to output_size keeping aspect ratio the same.
    """

    def __init__(self, output_size):
        assert isinstance(output_size, (int, tuple))
        self.output_size = output_size

    def __call__(self, image):
        h, w = image.shape[:2]
        if isinstance(self.output_size, int):
            if h > w:
                new_h, new_w = self.output_size * h / w, self.output_size
            else:
                new_h, new_w = self.output_size, self.output_size * w / h
        else:
            new_h, new_w = self.output_size

        new_h, new_w = int(new_h), int(new_w)

        img = transform.resize(image, (new_h, new_w), mode='constant')

        # h and w are swapped for landmarks because for images,
        # x and y axes are axis 1 and 0 respectively

        return img


class RandomGamma(object):

    def __init__(self, gamma_range):
        self.lower_gamma = gamma_range[0]
        self.upper_gamma = gamma_range[1]

    def __call__(self, image):
        return skimage.exposure.adjust_gamma(
            image,
            random.uniform(self.lower_gamma, self.upper_gamma)
        )


class FudanDrowsyDataset(torch.utils.data.Dataset):

    def __init__(self, df, mode):
        '''
        mode:
            RESCALE：直接缩放到128x128
            RANDOM：ADJUSTGAMMA -> RandomRescale 128~168 -> RANDOM_CROP 128 -> RANDOM_FLIP
            CENTER： middle_of_random_rescale -> CENTER_CROP 128
            AFFINE
        '''
        self.df = df
        self.rescale128 = Rescale((128, 128))
        self.rescale_lower_size = 128
        self.rescale_upper_size = 208
        self.recale_middle_size = (self.rescale_lower_size + self.rescale_upper_size) / 2
        self.random_gamma = RandomGamma((0.4, 1.6))
        self.random_rescale = RandomRescale((self.rescale_lower_size, self.rescale_upper_size))
        self.random_flip = RandomFlip()
        self.random_crop = RandomCrop((128, 128))
        self.middle_of_random_rescale = Rescale((self.recale_middle_size, self.recale_middle_size))
        self.center_crop = CenterCrop((128, 128))
        self.affine_seq = get_affine()
        self.to_tensor = transforms.ToTensor()
        self.normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                              std=[0.229, 0.224, 0.225])
        self.mode = mode

    def __len__(self):
        return len(self.df)

    def plot_sample(self, i):
        import matplotlib.pyplot as plt
        sample = self[i]
        image = sample['raw_image']
        plt.figure(dpi=72)
        plt.imshow(image)

    def __getitem__(self, i):
        sample = self.df.iloc[i]

        image = io.imread(sample['CroppedPath'])
        if self.mode == 'RESCALE':
            image = self.rescale128(image)
        elif self.mode == 'RANDOM':
            image = self.random_gamma(image)
            image = self.random_rescale(image)
            image = self.random_crop(image)
            image = self.random_flip(image)
        elif self.mode == 'CENTER':
            image = self.middle_of_random_rescale(image)
            image = self.center_crop(image)
        elif self.mode == 'AFFINE':
            image = self.random_gamma(image)
            image = self.rescale128(image)
            image = skimage.img_as_ubyte(image)
            image = self.affine_seq.augment_image(image)

        # support special numpy type
        image = image.copy()

        raw_image = image

        # convert to tensor and normalize
        image = self.to_tensor(image)
        image = self.normalize(image)
        image = image.float()

        label = const.label_name2id[sample['Label']]

        ret = {
            'image': image,
            'raw_image': raw_image,
            'label': label
        }
        return ret
