import os
import pickle
import numpy as np
from PIL import Image
from torch.utils.data import Dataset

_train_list = ['train_data_batch_1',
               'train_data_batch_2',
               'train_data_batch_3',
               'train_data_batch_4',
               'train_data_batch_5',
               'train_data_batch_6',
               'train_data_batch_7',
               'train_data_batch_8',
               'train_data_batch_9',
               'train_data_batch_10']
_val_list = ['val_data']


def get_dataset(transform_train, transform_test):
    # prepare datasets

    # Train set
    train = Imagenet32(train=True, transform=transform_train)  # Load all 1000 classes in memory

    # Test set
    test = Imagenet32(train=False, transform=transform_test)  # Load all 1000 test classes in memory

    return train, test


class Imagenet32(Dataset):
    def __init__(self, root='~/data/imagenet32', train=True, transform=None):
        if root[0] == '~':
            root = os.path.expanduser(root)
        self.transform = transform
        size = 32
        # Now load the picked numpy arrays

        if train:
            data, labels = [], []

            for f in _train_list:
                file = os.path.join(root, f)

                with open(file, 'rb') as fo:
                    entry = pickle.load(fo, encoding='latin1')
                    data.append(entry['data'])
                    labels += entry['labels']
            data = np.concatenate(data)

        else:
            f = _val_list[0]
            file = os.path.join(root, f)
            with open(file, 'rb') as fo:
                entry = pickle.load(fo, encoding='latin1')
                data = entry['data']
                labels = entry['labels']

        data = data.reshape((-1, 3, size, size))
        self.data = data.transpose((0, 2, 3, 1))  # Convert to HWC
        labels = np.array(labels) - 1
        self.labels = labels.tolist()

    def __getitem__(self, index):

        img, target = self.data[index], self.labels[index]
        img = Image.fromarray(img)

        if self.transform is not None:
            img = self.transform(img)

        return img, target, index

    def __len__(self):
        return len(self.data)


import numpy as np


def get_imb_num_per_cls(data, nb_classes: int, r_imb: float) -> list:
    """
    imbalance type: 'exp'
    $n_k=n_kr^k$, r denotes the imbalance ratio, and k is the class index.
    """
    max_num = len(data) / nb_classes
    num_per_cls = []
    for cls_idx in range(nb_classes):
        num = max_num * (r_imb ** (cls_idx / (nb_classes - 1.0)))
        num_per_cls.append(int(num))
    return num_per_cls


def gen_imb_data(data: np.ndarray, targets, num_per_cls, seed):
    np.random.seed(seed)
    new_data, new_targets = [], []
    targets = np.array(targets, dtype=np.int32)
    classes = np.unique(targets)
    for cls, img_num in zip(classes, num_per_cls):
        idx = np.where(targets == cls)[0]
        np.random.shuffle(idx)
        selected_idx = idx[:img_num]
        new_data.append(data[selected_idx, ...])
        new_targets.extend([cls, ] * img_num)

    new_data = np.vstack(new_data)
    return new_data, new_targets


def get_imb_data(data, targets, nb_classes, r_imb, seed):
    num_per_cls = get_imb_num_per_cls(data, nb_classes, r_imb)
    return gen_imb_data(data, targets, num_per_cls, seed)


import numpy as np
from numpy.testing import assert_array_almost_equal


def build_for_cifar100(size, noise):
    """ The noise matrix flips to the "next" class with probability 'noise'.
    """

    assert (noise >= 0.) and (noise <= 1.)

    P = (1. - noise) * np.eye(size)
    for i in np.arange(size - 1):
        P[i, i + 1] = noise

    # adjust last row
    P[size - 1, 0] = noise

    assert_array_almost_equal(P.sum(axis=1), 1, 1)
    return P


def noisify(y, p_minus, p_plus=None, random_state=0):
    """ Flip labels with probability p_minus.
    If p_plus is given too, the function flips with asymmetric probability.
    """

    assert np.all(np.abs(y) == 1)

    m = y.shape[0]
    new_y = y.copy()
    coin = np.random.RandomState(random_state)

    if p_plus is None:
        p_plus = p_minus

    # This can be made much faster by tossing all the coins and completely
    # avoiding the loop. Although, it is not simple to write the asymmetric
    # case then.
    for idx in np.arange(m):
        if y[idx] == -1:
            if coin.binomial(n=1, p=p_minus, size=1) == 1:
                new_y[idx] = -new_y[idx]
        else:
            if coin.binomial(n=1, p=p_plus, size=1) == 1:
                new_y[idx] = -new_y[idx]

    return new_y


def multiclass_noisify(y, P, random_state=0):
    """ Flip classes according to transition probability matrix T.
    It expects a number between 0 and the number of classes - 1.
    """

    assert P.shape[0] == P.shape[1]
    assert np.max(y) < P.shape[0]

    # row stochastic matrix
    assert_array_almost_equal(P.sum(axis=1), np.ones(P.shape[1]))
    assert (P >= 0.0).all()

    y = np.array(y)
    m = np.shape(y)[0]
    new_y = y.copy()
    flipper = np.random.RandomState(random_state)

    for idx in np.arange(m):
        i = y[idx]
        # draw a vector with only an 1
        flipped = flipper.multinomial(1, P[i, :], 1)[0]
        new_y[idx] = np.where(flipped == 1)[0]

    return new_y


def noisify_cifar100_asymmetric(y_train, noise, random_state=None):
    """mistakes are inside the same superclass of 10 classes, e.g. 'fish'
    """
    nb_classes = 100
    P = np.eye(nb_classes)
    n = noise
    nb_superclasses = 20
    nb_subclasses = 5

    if n > 0.0:
        for i in np.arange(nb_superclasses):
            init, end = i * nb_subclasses, (i + 1) * nb_subclasses
            P[init:end, init:end] = build_for_cifar100(nb_subclasses, n)

        y_train_noisy = multiclass_noisify(y_train, P=P,
                                           random_state=random_state)
        print(y_train_noisy)
        actual_noise = (y_train_noisy != y_train).mean()
        # actual_noise = 0
        assert actual_noise > 0.0
        print('Actual noise %.2f' % actual_noise)
        y_train = y_train_noisy

    return y_train
