import random
import math
import cv2
import numpy as np
import os
from PIL import Image
from munch import Munch
from itertools import chain
from pathlib import Path
import copy

# from paddlex.cv.transforms.cls_transforms import ClsTransform, RandomHorizontalFlip, Normalize
from core.dataset import Dataset
from paddle import fluid

from utils.logger import get_logger
logger = get_logger(__name__)


def listdir(dname):
    fnames = list(chain(*[list(Path(dname).rglob('*.' + ext))
                          for ext in ['png', 'jpg', 'jpeg', 'JPG']]))
    return fnames


def _make_balanced_sampler(labels, indexs):
    count = len(labels)
    class_counts = np.bincount(labels)
    class_weights = 1. / class_counts
    weights = class_weights[labels]
    weights = weights[indexs]
    weights = np.tile(weights, (1, 1))
    # logger.debug(f'class_counts: {class_counts}')
    # logger.debug(f'class_weights: {class_weights}')
    # logger.debug(f'labels: {len(labels)}, {labels}')
    # logger.debug(f'weights: {len(weights[0])}, {weights}')
    weight_indexs = []
    with fluid.dygraph.guard():
        x = fluid.dygraph.to_variable(weights)
        for _ in range(count):
            out = fluid.layers.sampling_id(x, min=0., max=3.)
            # print('out:', out.numpy())
            weight_indexs.append(out.numpy()[0])
    weight_indexs = np.array(weight_indexs)
    # logger.debug(f'weight_indexs: {len(weight_indexs)}, {weight_indexs}')
    return weight_indexs


class ClsTransform:
    """分类Transform的基类
    """

    def __init__(self):
        pass


class Compose(ClsTransform):
    """根据数据预处理/增强算子对输入数据进行操作。
       所有操作的输入图像流形状均是[H, W, C]，其中H为图像高，W为图像宽，C为图像通道数。
    Args:
        transforms (list): 数据预处理/增强算子。
    Raises:
        TypeError: 形参数据类型不满足需求。
        ValueError: 数据长度不匹配。
    """

    def __init__(self, transforms):
        if not isinstance(transforms, list):
            raise TypeError('The transforms must be a list!')
        if len(transforms) < 1:
            raise ValueError('The length of transforms ' + \
                             'must be equal or larger than 1!')
        self.transforms = transforms

        # 检查transforms里面的操作，目前支持PaddleX定义的或者是imgaug操作
        for op in self.transforms:
            if not isinstance(op, ClsTransform):
                import imgaug.augmenters as iaa
                if not isinstance(op, iaa.Augmenter):
                    raise Exception(
                        "Elements in transforms should be defined in 'paddlex.cls.transforms' or class of imgaug.augmenters.Augmenter, see docs here: https://paddlex.readthedocs.io/zh_CN/latest/apis/transforms/"
                    )

    def __call__(self, im, label=None):
        """
        Args:
            im (str/np.ndarray): 图像路径/图像np.ndarray数据。
            label (int): 每张图像所对应的类别序号。
        Returns:
            tuple: 根据网络所需字段所组成的tuple；
                字段由transforms中的最后一个数据预处理操作决定。
        """
        if isinstance(im, np.ndarray):
            if len(im.shape) != 3:
                raise Exception(
                    "im should be 3-dimension, but now is {}-dimensions".
                        format(len(im.shape)))
        else:
            try:
                # im = cv2.imread(im)

                im = Image.open(im).convert('RGB')
                im = np.array(im)
            except:
                raise TypeError('Can\'t read The image file {}!'.format(im))
        # im = im.astype('float32')
        # im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
        for op in self.transforms:
            if isinstance(op, ClsTransform):
                outputs = op(im, label)
                im = outputs[0]
                if len(outputs) == 2:
                    label = outputs[1]
            else:
                import imgaug.augmenters as iaa
                if isinstance(op, iaa.Augmenter):
                    im = execute_imgaug(op, im)
                outputs = (im,)
                if label is not None:
                    outputs = (im, label)
        return outputs

    def add_augmenters(self, augmenters):
        if not isinstance(augmenters, list):
            raise Exception(
                "augmenters should be list type in func add_augmenters()")
        transform_names = [type(x).__name__ for x in self.transforms]
        for aug in augmenters:
            if type(aug).__name__ in transform_names:
                logging.error(
                    "{} is already in ComposedTransforms, need to remove it from add_augmenters().".
                        format(type(aug).__name__))
        self.transforms = augmenters + self.transforms


class Normalize(ClsTransform):
    """对图像进行标准化。

    1. 对图像进行归一化到区间[0.0, 1.0]。
    2. 对图像进行减均值除以标准差操作。

    Args:
        mean (list): 图像数据集的均值。默认为[0.485, 0.456, 0.406]。
        std (list): 图像数据集的标准差。默认为[0.229, 0.224, 0.225]。

    """

    def __init__(self, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]):
        self.mean = mean
        self.std = std

    def __call__(self, im, label=None):
        """
        Args:
            im (np.ndarray): 图像np.ndarray数据。
            label (int): 每张图像所对应的类别序号。

        Returns:
            tuple: 当label为空时，返回的tuple为(im, )，对应图像np.ndarray数据；
                   当label不为空时，返回的tuple为(im, label)，分别对应图像np.ndarray数据、图像类别id。
        """
        mean = np.array(self.mean)
        std = np.array(self.std)
        # im = normalize(im, mean, std)
        im = im / 255.
        im -= mean
        im /= std
        if label is None:
            return (im, )
        else:
            return (im, label)


class RandomHorizontalFlip(ClsTransform):
    """以一定的概率对图像进行随机水平翻转，模型训练时的数据增强操作。

    Args:
        prob (float): 随机水平翻转的概率。默认为0.5。
    """

    def __init__(self, prob=0.5):
        self.prob = prob

    def __call__(self, im, label=None):
        """
        Args:
            im (np.ndarray): 图像np.ndarray数据。
            label (int): 每张图像所对应的类别序号。

        Returns:
            tuple: 当label为空时，返回的tuple为(im, )，对应图像np.ndarray数据；
                   当label不为空时，返回的tuple为(im, label)，分别对应图像np.ndarray数据、图像类别id。
        """
        if random.random() < self.prob:
            im = self.horizontal_flip(im)

        if label is None:
            return (im,)
        else:
            return (im, label)

    def horizontal_flip(self, im):
        if len(im.shape) == 3:
            im = im[:, ::-1, :]
        elif len(im.shape) == 2:
            im = im[:, ::-1]
        return im


class RandomCropProb(ClsTransform):
    """对图像进行随机剪裁，模型训练时的数据增强操作。

    1. 根据lower_scale、lower_ratio、upper_ratio计算随机剪裁的高、宽。
    2. 根据随机剪裁的高、宽随机选取剪裁的起始点。
    3. 剪裁图像。
    4. 调整剪裁后的图像的大小到crop_size*crop_size。

    Args:
        prob (float): 使用随机裁剪的概率，小于这个概率会使用裁剪，否则返回原图，默认为0.5。
        crop_size (int): 随机裁剪后重新调整的目标边长。默认为224。
        lower_scale (float): 裁剪面积相对原面积比例的最小限制。默认为0.08。
        lower_ratio (float): 宽变换比例的最小限制。默认为3. / 4。
        upper_ratio (float): 宽变换比例的最大限制。默认为4. / 3。
    """

    def __init__(self,
                 prob=0.5,
                 crop_size=224,
                 lower_scale=0.08,
                 lower_ratio=3. / 4,
                 upper_ratio=4. / 3):
        self.crop_size = crop_size
        self.lower_scale = lower_scale
        self.lower_ratio = lower_ratio
        self.upper_ratio = upper_ratio
        self.prob = prob

    def __call__(self, im, label=None):
        """
        Args:
            im (np.ndarray): 图像np.ndarray数据。
            label (int): 每张图像所对应的类别序号。

        Returns:
            tuple: 当label为空时，返回的tuple为(im, )，对应图像np.ndarray数据；
                   当label不为空时，返回的tuple为(im, label)，分别对应图像np.ndarray数据、图像类别id。
        """
        if random.random() < self.prob:
            im = self.random_crop(im, self.crop_size, self.lower_scale,
                                  self.lower_ratio, self.upper_ratio)
        else:
            im = cv2.resize(im, (self.crop_size, self.crop_size), cv2.INTER_LINEAR)
        if label is None:
            return (im,)
        else:
            return (im, label)

    def random_crop(self, im,
                    crop_size=224,
                    lower_scale=0.08,
                    lower_ratio=3. / 4,
                    upper_ratio=4. / 3):
        scale = [lower_scale, 1.0]
        ratio = [lower_ratio, upper_ratio]
        aspect_ratio = math.sqrt(np.random.uniform(*ratio))
        w = 1. * aspect_ratio
        h = 1. / aspect_ratio
        bound = min((float(im.shape[0]) / im.shape[1]) / (h ** 2),
                    (float(im.shape[1]) / im.shape[0]) / (w ** 2))
        scale_max = min(scale[1], bound)
        scale_min = min(scale[0], bound)
        target_area = im.shape[0] * im.shape[1] * np.random.uniform(
            scale_min, scale_max)
        target_size = math.sqrt(target_area)
        w = int(target_size * w)
        h = int(target_size * h)
        i = np.random.randint(0, im.shape[0] - h + 1)
        j = np.random.randint(0, im.shape[1] - w + 1)
        im = im[i:i + h, j:j + w, :]
        im = cv2.resize(im, (crop_size, crop_size))
        return im


class Resize(ClsTransform):
    def __init__(self, size):
        self.size = size

    def __call__(self, im, label=None):
        im = cv2.resize(im, (self.size, self.size))
        if label is None:
            return (im,)
        else:
            return (im, label)


class ReferenceDataset(Dataset):
    def __init__(self, root, transform=None,
                 num_workers='auto',
                 buffer_size=8,
                 parallel_method='thread',
                 balance_datasets=False,
                 shuffle=False):
        super(ReferenceDataset, self).__init__(
            transforms=transform,
            # debug
            num_workers=num_workers,
            # num_workers=1,
            buffer_size=buffer_size,
            parallel_method=parallel_method,
            shuffle=shuffle)
        self.samples, self.targets = self._make_dataset(root)
        # debug
        # self.samples = self.samples[:100]
        # self.targets = self.targets[:100]

        self.transform = transform
        self._epoch = 0
        self.num_samples = len(self.samples)
        self.indexs = np.arange(self.num_samples)
        self.balance_datasets = balance_datasets

    def _make_dataset(self, root):
        domains = os.listdir(root)
        fnames, fnames2, labels = [], [], []
        for idx, domain in enumerate(sorted(domains)):
            class_dir = os.path.join(root, domain)
            cls_fnames = listdir(class_dir)
            fnames += cls_fnames
            fnames2 += random.sample(cls_fnames, len(cls_fnames))
            labels += [idx] * len(cls_fnames)
        return np.array(list(zip(fnames, fnames2))), np.array(labels)

    def iterator(self):
        if self.balance_datasets:
            random.shuffle(self.indexs)
            cur_indexs = _make_balanced_sampler(self.targets, self.indexs)
        else:
            if self.shuffle:
                random.shuffle(self.indexs)
            cur_indexs = np.arange(self.num_samples)

        for index in range(self.num_samples):
            fname, fname2 = self.samples[self.indexs[cur_indexs[index]]]
            label = self.targets[self.indexs[cur_indexs[index]]]
            yield fname, fname2, label


class ImageFolder(Dataset):
    def __init__(self, root, transform=None,
                 num_workers='auto',
                 buffer_size=8,
                 parallel_method='thread',
                 balance_datasets=False,
                 shuffle=False):
        super(ImageFolder, self).__init__(
            transforms=transform,
            # debug
            num_workers=num_workers,
            # num_workers=1,
            buffer_size=buffer_size,
            parallel_method=parallel_method,
            shuffle=shuffle)
        self.samples, self.targets = self._make_dataset(root)
        # debug
        # self.samples = self.samples[:100]
        # self.targets = self.targets[:100]

        self.num_samples = len(self.samples)
        self.transform = transform
        self._pos = 0
        self.debug_fnames = {}
        self.indexs = np.arange(self.num_samples)
        self.balance_datasets = balance_datasets

    def _find_classes(self, dir):
        """
        Finds the class folders in a dataset.

        Args:
            dir (string): Root directory path.

        Returns:
            tuple: (classes, class_to_idx) where classes are relative to (dir), and class_to_idx is a dictionary.

        Ensures:
            No class is a subdirectory of another.
        """
        classes = [d.name for d in os.scandir(dir) if d.is_dir()]
        classes.sort()
        class_to_idx = {classes[i]: i for i in range(len(classes))}
        return classes, class_to_idx

    def _make_dataset(self, directory):
        classes, class_to_idx = self._find_classes(directory)
        all_fnames, labels = [], []
        for target_class in sorted(class_to_idx.keys()):
            class_index = class_to_idx[target_class]
            target_dir = os.path.join(directory, target_class)
            if not os.path.isdir(target_dir):
                continue
            for root, _, fnames in sorted(os.walk(target_dir, followlinks=True)):
                for fname in sorted(fnames):
                    path = os.path.join(root, fname)
                    item = path, class_index
                    all_fnames.append(path)
                    labels.append(class_index)

        return np.array(all_fnames), np.array(labels)

        # domains = os.listdir(root)
        # fnames, labels = [], []
        # for idx, domain in enumerate(sorted(domains)):
        #     class_dir = os.path.join(root, domain)
        #     cls_fnames = listdir(class_dir)
        #     fnames += sorted(cls_fnames)
        #     labels += [idx] * len(cls_fnames)
        # return np.array(fnames), np.array(labels)


    def iterator(self):
        if self.balance_datasets:
            random.shuffle(self.indexs)
            cur_indexs = _make_balanced_sampler(self.targets, self.indexs)
        else:
            if self.shuffle:
                random.shuffle(self.indexs)
            cur_indexs = np.arange(self.num_samples)

        for index in range(self.num_samples):
            fname = self.samples[self.indexs[cur_indexs[index]]]
            label = self.targets[self.indexs[cur_indexs[index]]]
            yield fname, label


def get_train_loader(root, which='source', img_size=256,
                     batch_size=8, prob=0.5, num_workers=4):
    logger.info('Preparing DataLoader to fetch %s images '
                'during the training phase...' % which)

    transform = Compose([
        RandomCropProb(prob=prob, crop_size=img_size, lower_scale=0.8, lower_ratio=0.9, upper_ratio=1.1),
        Resize(img_size),
        RandomHorizontalFlip(),
        Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
    ])

    # dataset = ReferenceDataset(root, transform)
    # generator = dataset.generator(batch_size=batch_size)
    # logger.debug(f'{next(iter(generator()))}')

    if which == 'source':
        train_dataset = ImageFolder(root, transform, buffer_size=batch_size * 10, shuffle=True, balance_datasets=True)
    elif which == 'reference':
        train_dataset = ReferenceDataset(root, transform, buffer_size=batch_size * 10, shuffle=True, balance_datasets=True)
    else:
        raise NotImplementedError

    logger.info(f'{which} has {train_dataset.num_samples} samples.')
    # debug_save(which, train_dataset)
    return train_dataset.generator(batch_size=batch_size)


def debug_save(which, d):
    logger.debug(f'dataset: {d}')
    i = 0
    gen = iter(d.generator(batch_size=1)())
    # for datas in iter(d.generator(batch_size=2)()):
    for _ in range(20):
        try:
            datas = next(gen)
        except StopIteration:
            gen = iter(d.generator(batch_size=4)())

        if which == 'source':
            image, label = datas[0]
            logger.debug(f'image: {image.shape}, label: {label}')
            fname = os.path.join('output', f'{which}_{i}_{label}.jpg')
            cv2.imwrite(fname, image)
            logger.debug(f'i: {i}, datas: {len(datas)}, {fname}')
        else:
            image1, image2, label = datas[0]
            fname1 = os.path.join('output', f'{which}_{i}_{label}_0.jpg')
            cv2.imwrite(fname1, image1)
            fname2 = os.path.join('output', f'{which}_{i}_{label}_1.jpg')
            cv2.imwrite(fname2, image2)
            logger.debug(f'i: {i}, datas: {len(datas)}, {fname1}, {fname2}')
        i += 1
        # if i == 2:
        #     break


def get_eval_loader(root, img_size=256, batch_size=32,
                    imagenet_normalize=True, shuffle=True,
                    num_workers=4, drop_last=False):
    logger.info('Preparing DataLoader for the evaluation phase...')

    return None


def get_test_loader(root, img_size=256, batch_size=32,
                    shuffle=True, num_workers=4):
    logger.info('Preparing DataLoader for the generation phase...')

    transform = Compose([
        RandomCropProb(prob=0, crop_size=img_size, lower_scale=0.8, lower_ratio=0.9, upper_ratio=1.1),
        # Resize(img_size),
        Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
    ])

    test_dataset = ImageFolder(root, transform, buffer_size=batch_size, shuffle=shuffle, num_workers=num_workers)
    logger.info(f'test has {test_dataset.num_samples} samples.')
    # debug_save('source', test_dataset)
    return test_dataset.generator(batch_size=test_dataset.num_samples if batch_size == -1 else batch_size)


class InputFetcher:
    def __init__(self, loader, loader_ref=None, latent_dim=16, mode=''):
        self.loader = loader
        self.loader_ref = loader_ref
        self.latent_dim = latent_dim
        self.mode = mode

    def fetch_next(self, iters, ref=False):
        try:
            datas = next(iters)
        except StopIteration:
            if not ref:
                self.iter_src = iter(self.loader())
                iters = self.iter_src
            else:
                self.iter_ref = iter(self.loader_ref())
                iters = self.iter_ref

            return self.fetch_next(iters, ref=ref)
        datas_x = []
        datas_y = []
        if len(datas[0]) == 2:
            for x, y in datas:
                datas_x.append(x)
                datas_y.append(y)
            return np.array(datas_x).transpose(0, 3, 1, 2), np.array(datas_y)
        elif len(datas[0]) == 3:
            datas_x2 = []
            for x, x2, y in datas:
                datas_x.append(x)
                datas_x2.append(x2)
                datas_y.append(y)
            return np.array(datas_x).transpose(0, 3, 1, 2), np.array(datas_x2).transpose(0, 3, 1, 2), np.array(datas_y)

    def _fetch_inputs(self):
        try:
            x, y = self.fetch_next(self.iter_src, ref=False)
        except (AttributeError, StopIteration):
            self.iter_src = iter(self.loader())
            x, y = self.fetch_next(self.iter_src, ref=False)
        return x, y

    def _fetch_refs(self):
        try:
            x, x2, y = self.fetch_next(self.iter_ref, ref=True)
        except (AttributeError, StopIteration):
            self.iter_ref = iter(self.loader_ref())
            x, x2, y = self.fetch_next(self.iter_ref, ref=True)
        return x, x2, y

    def __next__(self):
        x, y = self._fetch_inputs()
        if self.mode == 'train':
            x_ref, x_ref2, y_ref = self._fetch_refs()
            z_trg = np.random.randn(x.shape[0], self.latent_dim)
            z_trg2 = np.random.randn(x.shape[0], self.latent_dim)
            inputs = Munch(x_src=x, y_src=y, y_ref=y_ref,
                           x_ref=x_ref, x_ref2=x_ref2,
                           z_trg=z_trg, z_trg2=z_trg2)
        elif self.mode == 'val':
            x_ref, y_ref = self._fetch_inputs()
            inputs = Munch(x_src=x, y_src=y,
                           x_ref=x_ref, y_ref=y_ref)
        elif self.mode == 'test':
            inputs = Munch(x=x, y=y)
        else:
            raise NotImplementedError

        return inputs