import os
import torch
import torchvision
import platform


class Cutout:
    def __init__(self, size=16, p=0.5):
        self.size = size
        self.half_size = size // 2
        self.p = p

    def __call__(self, image):
        if torch.rand([1]).item() > self.p:
            return image

        left = torch.randint(-self.half_size, image.size(1) - self.half_size, [1]).item()
        top = torch.randint(-self.half_size, image.size(2) - self.half_size, [1]).item()
        right = min(image.size(1), left + self.size)
        bottom = min(image.size(2), top + self.size)

        image[:, max(0, left): right, max(0, top): bottom] = 0
        return image


class CIFAR:
    def __init__(self, num_classes, batch_size, threads=0, dataroot=r'/data'):
        self.desc = 'CIFAR{}_bz{}'.format(num_classes, batch_size)
        self.img_sz = 32
        self.num_classes = num_classes
        self.dataroot = dataroot
        self.dataset = torchvision.datasets.CIFAR100 if num_classes == 100 else torchvision.datasets.CIFAR10
        mean, std = self._get_statistics()
        train_transform = torchvision.transforms.Compose([
            torchvision.transforms.RandomCrop(size=(self.img_sz, self.img_sz), padding=4),
            torchvision.transforms.RandomHorizontalFlip(),
            torchvision.transforms.ToTensor(),
            torchvision.transforms.Normalize(mean, std),
            Cutout()
        ])

        test_transform = torchvision.transforms.Compose([
            torchvision.transforms.ToTensor(),
            torchvision.transforms.Normalize(mean, std)
        ])

        train_set = self.dataset(
            root=self.dataroot,
            train=True,
            download=True,
            transform=train_transform)
        test_set = self.dataset(
            root=self.dataroot,
            train=False,
            download=True,
            transform=test_transform)

        self.trainloader = torch.utils.data.DataLoader(
            train_set,
            batch_size=batch_size,
            shuffle=True,
            num_workers=threads)
        self.testloader = torch.utils.data.DataLoader(
            test_set,
            batch_size=batch_size,
            shuffle=False,
            num_workers=threads)
        self.inchannel = 3

        # self.classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')

    def _get_statistics(self):
        train_set = self.dataset(
            root=self.dataroot,
            train=True,
            download=True,
            transform=torchvision.transforms.ToTensor())

        data = torch.cat([d[0] for d in torch.utils.data.DataLoader(train_set)])
        return data.mean(dim=[0, 2, 3]), data.std(dim=[0, 2, 3])


class MyData:
    def __init__(self, batch_size=64):
        self.desc = 'MyData-bz{}'.format(batch_size)
        self.img_sz = 32
        self.num_classes = 10
        self.trainloader = None
        self.testloader = None


class CIFAR100(MyData):
    def __init__(self, batch_size=64, dataroot=r'/datahub/cifar100', image_size=32):
        super(CIFAR100, self).__init__()
        self.desc = 'CIFAR100[bz{}iz{}]'.format(batch_size, image_size)
        self.img_sz = image_size
        self.num_classes = 100
        self.dataroot = dataroot
        # mean = [0.5070751592371323, 0.48654887331495095, 0.4409178433670343]
        # std = [0.2673342858792401, 0.2564384629170883, 0.27615047132568404]
        transform_train = torchvision.transforms.Compose([
            torchvision.transforms.RandomCrop(32, padding=4),
            torchvision.transforms.RandomHorizontalFlip(),
            torchvision.transforms.Resize([image_size, image_size]),
            torchvision.transforms.ToTensor(),
            torchvision.transforms.Normalize((0.507, 0.487, 0.441), (0.267, 0.256, 0.276)),
        ])
        transform_test = torchvision.transforms.Compose([
            torchvision.transforms.Resize([image_size, image_size]),
            torchvision.transforms.ToTensor(),
            torchvision.transforms.Normalize((0.507, 0.487, 0.441), (0.267, 0.256, 0.276)),
        ])
        # transform_train = torchvision.transforms.Compose([
        #     torchvision.transforms.Resize(self.img_sz),
        #     torchvision.transforms.CenterCrop(self.img_sz),
        #     torchvision.transforms.RandomHorizontalFlip(),
        #     torchvision.transforms.RandomRotation(15),  # 数据增强
        #     torchvision.transforms.ToTensor(),
        #     torchvision.transforms.Normalize(mean, std)
        # ])
        # transform_test = torchvision.transforms.Compose(
        #     [torchvision.transforms.Resize(self.img_sz),
        #      torchvision.transforms.CenterCrop(self.img_sz),
        #      torchvision.transforms.ToTensor(),
        #      torchvision.transforms.Normalize(mean, std)
        #      ])
        train_data = torchvision.datasets.CIFAR100(
            self.dataroot,
            train=True,
            transform=transform_train,
            download=True)
        self.trainloader = torch.utils.data.DataLoader(
            train_data,
            batch_size=batch_size,
            shuffle=True,
            drop_last=False,
            num_workers=0 if platform.system() == 'Windows' else os.cpu_count() // 2
        )
        test_data = torchvision.datasets.CIFAR100(
            self.dataroot,
            train=False,
            transform=transform_test,
            download=False)
        self.testloader = torch.utils.data.DataLoader(
            test_data,
            batch_size=256,
            shuffle=False,
            num_workers=0 if platform.system() == 'Windows' else os.cpu_count() // 2
        )
        self.inchannels = 3


class CIFAR10(MyData):
    def __init__(self, batch_size=64, dataroot=r'/datahub/cifar10/', image_size=32):
        super(CIFAR10, self).__init__()
        self.desc = 'CIFAR10[bz{}iz{}]'.format(batch_size,image_size)
        self.img_sz = image_size
        self.num_classes = 10
        self.dataroot = dataroot
        # mean = [0.5070751592371323, 0.48654887331495095, 0.4409178433670343]
        # std = [0.2673342858792401, 0.2564384629170883, 0.27615047132568404]
        transform_train = torchvision.transforms.Compose([
            torchvision.transforms.RandomCrop(32, padding=4),
            torchvision.transforms.RandomHorizontalFlip(),
            torchvision.transforms.Resize([image_size, image_size]),
            torchvision.transforms.ToTensor(),
            torchvision.transforms.Normalize((0.507, 0.487, 0.441), (0.267, 0.256, 0.276)),
        ])
        transform_test = torchvision.transforms.Compose([
            torchvision.transforms.Resize([image_size, image_size]),
            torchvision.transforms.ToTensor(),
            torchvision.transforms.Normalize((0.507, 0.487, 0.441), (0.267, 0.256, 0.276)),
        ])
        train_data = torchvision.datasets.CIFAR10(
            self.dataroot,
            train=True,
            transform=transform_train,
            download=False)
        self.trainloader = torch.utils.data.DataLoader(
            train_data,
            batch_size=batch_size,
            shuffle=True,
            num_workers=0 if platform.system() == 'Windows' else os.cpu_count() // 2
        )
        test_data = torchvision.datasets.CIFAR10(
            self.dataroot,
            train=False,
            transform=transform_test,
            download=False)
        self.testloader = torch.utils.data.DataLoader(
            test_data,
            batch_size=256,
            shuffle=True,
            num_workers=0 if platform.system() == 'Windows' else os.cpu_count() // 2
        )
        self.inchannels = 3

from torch.utils.data import DataLoader

class SVHN(MyData):
    def __init__(self, batch_size=64, dataroot=r'/datahub/svhn/',image_size=32):
        super(SVHN, self).__init__()
        self.desc = 'SVHN[bz{}iz{}]'.format(batch_size,image_size)
        self.dataroot = dataroot
        self.img_sz = image_size
        self.num_classes = 10
        mean = [0.5070751592371323, 0.48654887331495095, 0.4409178433670343]
        std = [0.2673342858792401, 0.2564384629170883, 0.27615047132568404]
        transform_train = torchvision.transforms.Compose([
            torchvision.transforms.Resize(self.img_sz),
            torchvision.transforms.CenterCrop(self.img_sz),
            torchvision.transforms.RandomHorizontalFlip(),
            torchvision.transforms.RandomRotation(15),  # 数据增强
            torchvision.transforms.Resize([image_size, image_size]),
            torchvision.transforms.ToTensor(),
            torchvision.transforms.Normalize(mean, std)
        ])
        transform_test = torchvision.transforms.Compose(
            [torchvision.transforms.Resize([image_size, image_size]),
             torchvision.transforms.ToTensor(),
             torchvision.transforms.Normalize(mean, std)])
        train_data = torchvision.datasets.SVHN(
            self.dataroot,
            split='train',
            transform=transform_train,
            download=True)
        self.trainloader = torch.utils.data.DataLoader(
            train_data,
            batch_size=batch_size,
            shuffle=True,
            num_workers=0 if platform.system() == 'Windows' else os.cpu_count() // 2
        )
        test_data = torchvision.datasets.SVHN(
            self.dataroot,
            split='test',
            transform=transform_test,
            download=True)

        self.testloader = torch.utils.data.DataLoader(
            test_data,
            batch_size=256,
            shuffle=True,
            num_workers=0 if platform.system() == 'Windows' else os.cpu_count() // 2
        )
        self.inchannels = 3


if __name__ == '__main__':
    a = SVHN()
    for inputs, targets in a.testloader:
        print(targets.shape)
