import torch
import torchvision
from torch.utils.data import Dataset
from torchvision import transforms
import numpy as np


class DatasetSplit(Dataset):
    def __init__(self, dataset, idxs):
        self.dataset = dataset
        self.idxs = list(idxs)

    def __len__(self):
        return len(self.idxs)

    def __getitem__(self, item):
        image, label = self.dataset[self.idxs[item]]
        return image, label


class Data(object):
    # self.trainset, self.testset
    def __init__(self, args):
        if args.dataset == 'mnist':
            # 数据增强
            tra_transformer = transforms.Compose(
                [
                    transforms.ToTensor(),
                ]
            )
            val_transformer = transforms.Compose(
                [
                    transforms.ToTensor()
                ]
            )

            self.train_set = torchvision.datasets.MNIST(
                root="~/data", train=True, download=True, transform=tra_transformer
            )
            if args.iid == 0:  # noniid
                groups, rand_set_all = cifar10_noniid(self.train_set, 5)  # groups为dict数组
                self.train_loader = groups
            else:
                data_num = [12000 for _ in range(5)]

                splited_set = torch.utils.data.random_split(self.train_set, data_num)

                self.train_loader = splited_set

            test_set = torchvision.datasets.MNIST(
                root="~/data", train=False, download=True, transform=val_transformer
            )
            self.test_loader = torch.utils.data.DataLoader(
                test_set, batch_size=args.batchsize, shuffle=False, num_workers=4
            )
        elif args.dataset == 'cifar10':
            # 数据增强
            tra_transformer = transforms.Compose(
                [
                    transforms.RandomCrop(32, padding=4),
                    transforms.RandomHorizontalFlip(),
                    transforms.ToTensor(),
                    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
                ]
            )
            val_transformer = transforms.Compose(
                [
                    transforms.ToTensor(),
                    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
                ]
            )

            self.train_set = torchvision.datasets.CIFAR10(
                root="~/data", train=True, download=True, transform=tra_transformer
            )
            if args.iid == 0:  # noniid
                groups, rand_set_all = cifar10_noniid(self.train_set, 5)  # groups为dict数组
                self.train_loader = groups
            else:
                data_num = [10000 for _ in range(5)]

                splited_set = torch.utils.data.random_split(self.train_set, data_num)

                self.train_loader = splited_set
                # for i in range(args.node_num):
                #     # self.train_loader.append(torch.utils.data.DataLoader(
                #     #     splited_set[i], batch_size=args.batchsize, shuffle=True, num_workers=4
                #     # ))
                #     self.train_loader.append(splited_set[i])

            test_set = torchvision.datasets.CIFAR10(
                root="~/data", train=False, download=True, transform=val_transformer
            )
            self.test_loader = torch.utils.data.DataLoader(
                test_set, batch_size=args.batchsize, shuffle=False, num_workers=4
            )
        elif args.dataset == 'cifar100':
            # 数据增强
            tra_transformer = transforms.Compose(
                [
                    transforms.RandomCrop(32, padding=4),
                    transforms.RandomHorizontalFlip(),
                    transforms.ToTensor(),
                    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
                ]
            )
            val_transformer = transforms.Compose(
                [
                    transforms.ToTensor(),
                    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
                ]
            )

            self.train_set = torchvision.datasets.CIFAR100(
                root="~/data", train=True, download=True, transform=tra_transformer
            )
            if args.iid == 0:  # noniid
                groups, rand_set_all = cifar10_noniid(self.train_set, 5)  # groups为dict数组
                self.train_loader = groups
            else:
                data_num = [10000 for _ in range(5)]
                # data_num = [50000 for _ in range(1)]

                splited_set = torch.utils.data.random_split(self.train_set, data_num)

                self.train_loader = splited_set

            test_set = torchvision.datasets.CIFAR100(
                root="~/data", train=False, download=True, transform=val_transformer
            )
            self.test_loader = torch.utils.data.DataLoader(
                test_set, batch_size=args.batchsize, shuffle=False, num_workers=4
            )


def cifar10_noniid(dataset, num_users, num_shards=10, num_imgs=5000, train=True, rand_set_all=[]):
    if len(dataset) == 60000:
        num_imgs = 6000
    assert num_shards % num_users == 0
    shard_per_user = int(num_shards / num_users)  # 每个节点分2组分类集，每集合5000个相同分类的图片

    idx_shard = [i for i in range(num_shards)]
    dict_users = {i: np.array([], dtype='int64') for i in range(num_users)}
    idxs = np.arange(num_shards * num_imgs)
    labels = np.array(dataset.targets)

    assert num_shards * num_imgs == len(labels)

    # sort labels
    idxs_labels = np.vstack((idxs, labels))
    idxs_labels = idxs_labels[:, idxs_labels[1, :].argsort()]
    idxs = idxs_labels[0, :]

    # divide and assign
    if len(rand_set_all) == 0:
        for i in range(num_users):
            rand_set = set(np.random.choice(idx_shard, shard_per_user, replace=False))  # 取40个碎片
            for rand in rand_set:
                rand_set_all.append(rand)

            idx_shard = list(set(idx_shard) - rand_set)  # remove shards from possible choices for other users
            for rand in rand_set:
                dict_users[i] = np.concatenate((dict_users[i], idxs[rand * num_imgs:(rand + 1) * num_imgs]), axis=0)

    else:  # this only works if the train and validate_set set have the same distribution of labels
        for i in range(num_users):
            rand_set = rand_set_all[i * shard_per_user: (i + 1) * shard_per_user]
            for rand in rand_set:
                dict_users[i] = np.concatenate((dict_users[i], idxs[rand * num_imgs:(rand + 1) * num_imgs]), axis=0)

    return dict_users, rand_set_all
