import numpy as np
import logging, os
import torch
import math
import torchvision
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from StreamLearn.Dataset.StreamDataset import StreamDataset

__all__ = ['CIFAR10C', 'CIFAR100C', "get_cifar_loader", "CIFAR10CB", "CIFAR100CB"]

def sparse2coarse(targets):
    """Convert Pytorch CIFAR100 sparse targets to coarse targets.
    Usage:
        trainset = torchvision.datasets.CIFAR100(path)
        trainset.targets = sparse2coarse(trainset.targets)
    """
    coarse_labels = np.array([ 4,  1, 14,  8,  0,  6,  7,  7, 18,  3,  
                               3, 14,  9, 18,  7, 11,  3,  9,  7, 11,
                               6, 11,  5, 10,  7,  6, 13, 15,  3, 15,  
                               0, 11,  1, 10, 12, 14, 16,  9, 11,  5, 
                               5, 19,  8,  8, 15, 13, 14, 17, 18, 10, 
                               16, 4, 17,  4,  2,  0, 17,  4, 18, 17, 
                               10, 3,  2, 12, 12, 16, 12,  1,  9, 19,  
                               2, 10,  0,  1, 16, 12,  9, 13, 15, 13, 
                              16, 19,  2,  4,  6, 19,  5,  5,  8, 19, 
                              18,  1,  2, 15,  6,  0, 17,  8, 14, 13])
    return coarse_labels[targets]

class CIFAR10C(StreamDataset):
    NAME = "CIFAR-10-C" 
    CORRUPTIONS = ("shot_noise", "motion_blur", "snow", "pixelate", 
        "gaussian_noise", "defocus_blur", "brightness", "fog",
        "zoom_blur", "frost", "glass_blur", "impulse_noise", "contrast",
        "jpeg_compression", "elastic_transform")

    def __init__(self, 
        root="/data", 
        batch_size=64, 
        seed=998244353,
        corruptions=CORRUPTIONS,
        severities=1,
    ):
        super().__init__()
        self.root, self.seed = root, seed
        self.batch_size = batch_size
        self.rand = np.random.RandomState(seed=seed)
        self._epoch, self._epochs = 0, 0
        self._data, self._label = None, None
        self.severities = severities if isinstance(severities, list) else [severities]
        self.corruptions = corruptions

    def run(self, ):
        for i_c, corruption in enumerate(self.corruptions):
            for i_s, severity in enumerate(self.severities):
                yield self._generate(i_c, i_s)
    
    def _generate(self, i_c, i_s):
        corruption = self.corruptions[i_c]
        severity = self.severities[i_s]
        # Read & shuffle data
        data, label = self.load_corruptions_cifar(severity=severity, corruption=corruption)
        rand_index = self.rand.permutation(data.shape[0])
        data, label = data[rand_index, ...], label[rand_index, ...]
        # print(label.numpy().tolist())
        # Calculate class ratio
        ratio = np.array([(torch.sum(label == i) / len(label)).item() for i in range(self.num_classes())])
        logging.info(" > Class Ratio: ")
        logging.info(ratio)
        # Yield data
        n_batches = math.ceil(data.shape[0] / self.batch_size)
        for counter in range(n_batches):
            data_curr = data[counter * self.batch_size:(counter + 1) * self.batch_size]
            label_curr = label[counter * self.batch_size:(counter + 1) * self.batch_size]
            tag_cur = f"{corruption}-{severity}-{counter}"
            yield data_curr, label_curr, tag_cur, ratio


    def num_classes(self,): return 10
    def download(self,): pass

    def load_cifar(self,):
        dataset = datasets.CIFAR10(root=self.root, train=False, transform=transforms.Compose([transforms.ToTensor()]), download=True)
        return self.load_dataset(dataset)

    def load_corruptions_cifar(self, severity, corruption):
        assert 0 <= severity <= 5
        if severity > 0: assert corruption in self.CORRUPTIONS
        if severity == 0: return self.load_cifar()
        n_total_cifar = 10000
        if not os.path.exists(self.root): raise FileNotFoundError("The root of datasets is not found.")
        data_dir = os.path.join(self.root, self.NAME)
        # Load labels
        label_path = os.path.join(data_dir, "labels.npy")
        if not os.path.isfile(label_path): raise FileNotFoundError("Labels are missing.")
        labels = np.load(label_path)
        labels = labels[: n_total_cifar]
        # if len(set(labels)) == 100: labels = sparse2coarse(labels)
        # Load images
        data_path = os.path.join(data_dir, f"{corruption}.npy")
        if not os.path.isfile(data_path): raise FileNotFoundError("Data {corruption} is missing.")
        images_all = np.load(data_path)
        images = images_all[(severity - 1) * n_total_cifar: severity * n_total_cifar]
        # To torch tensors
        images = np.transpose(images, (0, 3, 1, 2))
        images = images.astype(np.float32) / 255
        images = torch.tensor(images)
        labels = torch.tensor(labels)
        return images, labels

class CIFAR10CB(CIFAR10C):
    NAME = "CIFAR-10-C" 
    CORRUPTIONS = ("shot_noise", "motion_blur", "snow", "pixelate", 
        "gaussian_noise", "defocus_blur", "brightness", "fog",
        "zoom_blur", "frost", "glass_blur", "impulse_noise", "contrast",
        "jpeg_compression", "elastic_transform")

    def __init__(self, 
        root="/data", 
        batch_size=64, 
        seed=998244353,
        corruptions=CORRUPTIONS,
        severities=1,
        bind_class=[],
        bind_ratio=[],
    ):
        super().__init__(root=root, batch_size=batch_size, seed=seed, corruptions=corruptions, severities=severities)
        self.bind_class = bind_class
        self.bind_ratio = bind_ratio

    def _generate(self, i_c, i_s):
        corruption = self.corruptions[i_c]
        severity = self.severities[i_s]
        # Read
        data, label = self.load_corruptions_cifar(severity=severity, corruption=corruption)
        # Shuffle data
        rand_index = self.rand.permutation(data.shape[0])
        data, label = data[rand_index, ...], label[rand_index, ...]
        if i_c < len(self.bind_class):
            index = self.bind_class[i_c]
            ratio = self.bind_ratio
            ratio = ratio / (ratio + self.num_classes() - 1)
            n_samples = data.shape[0]
            proba = torch.where(label == index, 1.0, 1.0 / n_samples)
            proba = proba / proba.sum()
            n_sampling = int(data.shape[0] / self.num_classes() / ratio)
            index = self.rand.choice(n_samples, n_sampling, p=proba.cpu().numpy(), replace=False)
            select = np.array([False] * n_samples)
            select[index] = True
            data = data[select, ...]
            label = label[select, ...]
            # data = torch.cat([data[select, ...],data[~select, ...]], 0)
            # label = torch.cat([label[select, ...], label[~select, ...]], 0)
        
        # Calculate class ratio
        ratio = [torch.sum(label == i) / len(label) for i in range(self.num_classes())]
        logging.info(" > Class Ratio: ")
        logging.info(ratio)
        
        # Yield data
        n_batches = math.ceil(data.shape[0] / self.batch_size)
        for counter in range(n_batches):
            data_curr = data[counter * self.batch_size:(counter + 1) * self.batch_size]
            label_curr = label[counter * self.batch_size:(counter + 1) * self.batch_size]
            tag_cur = f"{corruption}-{severity}-{counter}"
            yield data_curr, label_curr, tag_cur, ratio

class CIFAR100C(CIFAR10C):
    NAME = "CIFAR-100-C" 
    CORRUPTIONS = ("shot_noise", "motion_blur", "snow", "pixelate", 
        "gaussian_noise", "defocus_blur", "brightness", "fog",
        "zoom_blur", "frost", "glass_blur", "impulse_noise", "contrast",
        "jpeg_compression", "elastic_transform")

    def __init__(self, 
        root="/data", 
        batch_size=64, 
        seed=998244353,
        corruptions=CORRUPTIONS,
        severities=1,
    ):
        super().__init__(root, batch_size, seed)
        self._epoch, self._epochs = 0, 0
        self._data, self._label = None, None
        self.severities = severities if isinstance(severities, list) else [severities]
        self.corruptions = corruptions

    def num_classes(self,): return 100

    def load_cifar(self,):
        dataset = datasets.CIFAR100(root=self.root, train=False, transform=transforms.Compose([transforms.ToTensor()]), download=True)
        return self.load_dataset(dataset)

class CIFAR100CB(CIFAR100C):
    NAME = "CIFAR-100-C" 
    CORRUPTIONS = ("shot_noise", "motion_blur", "snow", "pixelate", 
        "gaussian_noise", "defocus_blur", "brightness", "fog",
        "zoom_blur", "frost", "glass_blur", "impulse_noise", "contrast",
        "jpeg_compression", "elastic_transform")

    def __init__(self, 
        root="/data", 
        batch_size=64, 
        seed=998244353,
        corruptions=CORRUPTIONS,
        severities=1,
        bind_class=[],
        bind_ratio=[],
    ):
        super().__init__(root=root, batch_size=batch_size, seed=seed, corruptions=corruptions, severities=severities)
        self.bind_class = bind_class
        self.bind_ratio = bind_ratio

    def _generate(self, i_c, i_s):
        corruption = self.corruptions[i_c]
        severity = self.severities[i_s]
        # Read
        data, label = self.load_corruptions_cifar(severity=severity, corruption=corruption)
        clabel = torch.tensor(sparse2coarse(label))
        num_cclasses = 20
        # Shuffle data
        rand_index = self.rand.permutation(data.shape[0])
        data, label = data[rand_index, ...], label[rand_index, ...]
        # Random Data
        if i_c < len(self.bind_class):
            index = self.bind_class[i_c]
            index = list(map(int, index.split(",")))
            m = len(index)
            ratio = self.bind_ratio
            n_samples = data.shape[0]

            large_proba = 1.0
            small_proba = 1.0 / n_samples
            proba = torch.ones_like(clabel) * small_proba
            for idx in index:
                proba_large = torch.where(clabel == idx, large_proba, 0.0)
                proba += proba_large
            proba = proba / proba.sum()

            n_sampling = int(n_samples / num_cclasses / ratio * (m * ratio + num_cclasses - m))
            index = self.rand.choice(n_samples, n_sampling, p=proba.cpu().numpy(), replace=False)
            select = np.array([False] * n_samples)
            select[index] = True
            data = data[select, ...]
            label = label[select, ...]
            # data = torch.cat([data[select, ...],data[~select, ...]], 0)
            # label = torch.cat([label[select, ...], label[~select, ...]], 0)
            # data, label = data[index, ...], label[index, ...]
        
        # Calculate class ratio
        ratio = [torch.sum(label == i) / len(label) for i in range(self.num_classes())]
        logging.info(" > Class Ratio: ")
        logging.info(ratio)
        
        # Yield data
        n_batches = math.ceil(data.shape[0] / self.batch_size)
        for counter in range(n_batches):
            data_curr = data[counter * self.batch_size:(counter + 1) * self.batch_size]
            label_curr = label[counter * self.batch_size:(counter + 1) * self.batch_size]
            tag_cur = f"{corruption}-{severity}-{counter}"
            yield data_curr, label_curr, tag_cur, ratio

def get_cifar_loader(dataset, root="/data", batch_size=256):
    assert dataset in ["CIFAR10", "CIFAR100"]
    NORM_VAL = {
        "CIFAR10": ((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
        "CIFAR100": ((0.5071, 0.4865, 0.4409), (0.2673, 0.2564, 0.2762))
    }
    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        # transforms.Normalize(NORM_VAL[args.dataset]),
    ])
    transform_test = transforms.Compose([
        transforms.ToTensor(),
        # transforms.Normalize(NORM_VAL[args.dataset]),
    ])

    trainset = getattr(datasets, dataset)(root=root, train=True, download=True, transform=transform_train)
    testset = getattr(datasets, dataset)(root=root, train=False, download=True, transform=transform_test)
    # if dataset == "CIFAR100":
    #     trainset.targets = sparse2coarse(trainset.targets)
    #     testset.targets = sparse2coarse(testset.targets)
    trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=2)
    testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=2)
    
    CLAS_VAL = {
        "CIFAR10": 10,
        "CIFAR100": 100,
    }
    n_classes = CLAS_VAL[dataset]

    return trainloader, testloader, n_classes