import random
import math
import cv2
import numpy as np
import os
from munch import Munch
from PIL import Image
from itertools import chain
from pathlib import Path
import copy
import traceback

import paddle
from paddle import fluid
from paddle.io import Dataset, DataLoader, BatchSampler

from core.transforms import Compose, RandomCropProb, Resize, RandomHorizontalFlip, Normalize
from utils.logger import get_logger

logger = get_logger(__name__)


def listdir(dname):
    fnames = list(chain(*[list(Path(dname).rglob('*.' + ext))
                          for ext in ['png', 'jpg', 'jpeg', 'JPG']]))
    return fnames


class DefaultDataset(Dataset):
    def __init__(self, root, transform=None):
        self.samples = listdir(root)
        self.samples.sort()
        self.transform = transform
        self.targets = None

    def __getitem__(self, index):
        fname = self.samples[index]
        img = Image.open(fname).convert('RGB')
        if self.transform is not None:
            img = self.transform(img)
        return img

    def __len__(self):
        return len(self.samples)


class ReferenceDataset(Dataset):
    def __init__(self, root, transform=None):
        self.samples, self.targets = self._make_dataset(root)
        self.transform = transform

    def _make_dataset(self, root):
        domains = os.listdir(root)
        fnames, fnames2, labels = [], [], []
        for idx, domain in enumerate(sorted(domains)):
            class_dir = os.path.join(root, domain)
            cls_fnames = listdir(class_dir)
            fnames += cls_fnames
            fnames2 += random.sample(cls_fnames, len(cls_fnames))
            labels += [idx] * len(cls_fnames)
        return list(zip(fnames, fnames2)), labels

    def __getitem__(self, index):
        fname, fname2 = self.samples[index]
        fname = os.path.abspath(fname)
        fname2 = os.path.abspath(fname2)
        label = self.targets[index]
        try:
            img = Image.open(fname).convert('RGB')
            img2 = Image.open(fname2).convert('RGB')
            img = np.array(img)
            img2 = np.array(img2)
            if self.transform is not None:
                img = self.transform(img)
                img2 = self.transform(img2)
                img = np.array(img[0])
                img2 = np.array(img2[0])
            img = img.transpose((2, 0, 1))
            img2 = img2.transpose((2, 0, 1))
        except Exception as e:
            traceback.print_exc()
        return img, img2, label

    def __len__(self):
        return len(self.targets)


class ImageFolder(Dataset):
    def __init__(self, root, transform=None):
        self.samples, self.targets = self._make_dataset(root)
        self.transform = transform
        logger.info(f'Count: {self.__len__()}, {root}')

    def _make_dataset(self, root):
        domains = os.listdir(root)
        fnames, labels = [], []
        for idx, domain in enumerate(sorted(domains)):
            class_dir = os.path.join(root, domain)
            cls_fnames = listdir(class_dir)
            fnames += cls_fnames
            labels += [idx] * len(cls_fnames)
        return fnames, labels

    def __getitem__(self, index):
        fname = os.path.abspath(self.samples[index])
        label = self.targets[index]
        try:
            # img = cv2.imread(fname)
            img = Image.open(fname).convert('RGB')
            img = np.array(img)
            if self.transform is not None:
                img = self.transform(img)
                img = np.array(img[0])
            img = img.transpose((2, 0, 1))
            # logger.debug(f'img: {np.array(img).shape}')
        except Exception as e:
            traceback.print_exc()
        return img, label

    def __len__(self):
        return len(self.targets)


def get_train_loader(root, places, which='source', img_size=256,
                     batch_size=8, prob=0.5, num_workers=4):
    logger.info('Preparing DataLoader to fetch %s images '
                'during the training phase...' % which)

    transform = Compose([
        RandomCropProb(prob=prob, crop_size=img_size, lower_scale=0.8, lower_ratio=0.9, upper_ratio=1.1),
        Resize(img_size),
        RandomHorizontalFlip(),
        Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
    ])

    if which == 'source':
        dataset = ImageFolder(root, transform)
    elif which == 'reference':
        dataset = ReferenceDataset(root, transform)
    else:
        raise NotImplementedError

    return DataLoader(dataset,
                      batch_size=batch_size,
                      drop_last=False,
                      num_workers=num_workers,
                      places=places,
                      shuffle=True
                      )


def get_eval_loader(root, img_size=256, batch_size=32,
                    imagenet_normalize=True, shuffle=True,
                    num_workers=4, drop_last=False):
    logger.info('Preparing DataLoader for the evaluation phase...')

    return None


def get_test_loader(root, places, img_size=256, batch_size=32,
                    shuffle=True, num_workers=4):
    logger.info('Preparing DataLoader for the generation phase...')
    transform = Compose([
        Resize(img_size),
        Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
    ])
    dataset = ImageFolder(root, transform=transform)

    
    return DataLoader(dataset,
                      batch_size=batch_size,
                      drop_last=False,
                      num_workers=num_workers,
                      places=places,
                      shuffle=shuffle
                      )


class InputFetcher:
    def __init__(self, loader, loader_ref=None, latent_dim=16, mode=''):
        self.loader = loader
        self.loader_ref = loader_ref
        self.latent_dim = latent_dim
        self.mode = mode

    def _fetch_inputs(self):
        try:
            x, y = next(self.iter_src)
        except (AttributeError, StopIteration):
            self.iter_src = iter(self.loader())
            x, y = next(self.iter_src)
        return x, y

    def _fetch_refs(self):
        try:
            x, x2, y = next(self.iter_ref)
        except (AttributeError, StopIteration):
            self.iter_ref = iter(self.loader_ref())
            x, x2, y = next(self.iter_ref)
        return x, x2, y

    def __next__(self):
        x, y = self._fetch_inputs()
        if self.mode == 'train':
            x_ref, x_ref2, y_ref = self._fetch_refs()
            z_trg = np.random.randn(x.shape[0], self.latent_dim)
            z_trg2 = np.random.randn(x.shape[0], self.latent_dim)
            inputs = Munch(x_src=x, y_src=y, y_ref=y_ref,
                           x_ref=x_ref, x_ref2=x_ref2,
                           z_trg=z_trg, z_trg2=z_trg2)
        elif self.mode == 'val':
            x_ref, y_ref = self._fetch_inputs()
            inputs = Munch(x_src=x, y_src=y,
                           x_ref=x_ref, y_ref=y_ref)
        elif self.mode == 'test':
            inputs = Munch(x=x, y=y)
        else:
            raise NotImplementedError

        return Munch({k: v for k, v in inputs.items()})