import matplotlib.pyplot as plt
import torch
import torchvision
from torch import nn
from torch.nn import functional as F
from d2l import torch as d2l
from torch.utils import data

from 实验.Cutout import Cutout


class Residual(nn.Module):
    def __init__(self, input_channels, num_channels,
                 use_1x1conv=False, strides=1):
        super().__init__()
        self.conv1 = nn.Conv2d(input_channels, num_channels, kernel_size=3, padding=1, stride=strides)
        self.conv2 = nn.Conv2d(num_channels, num_channels, kernel_size=3, padding=1)
        if use_1x1conv:
            self.conv3 = nn.Conv2d(input_channels, num_channels, kernel_size=1, stride=strides)
        else:
            self.conv3 = None
        self.bn1 = nn.BatchNorm2d(num_channels)
        self.bn2 = nn.BatchNorm2d(num_channels)

    def forward(self, X):
        Y = F.relu(self.bn1(self.conv1(X)))
        Y = self.bn2(self.conv2(Y))
        if self.conv3:
            X = self.conv3(X)
        Y += X
        return F.relu(Y)


def CustomResnet18(num_classes, input_channels=1):
    def resnet_block(input_channels, num_channels, num_residuals, first_block=False):
        blk = []
        for i in range(num_residuals):
            if i == 0 and not first_block:
                blk.append(Residual(input_channels, num_channels, use_1x1conv=True, strides=2))
            else:
                blk.append(Residual(num_channels, num_channels))
        return blk

    b1 = nn.Sequential(nn.Conv2d(input_channels, 64, kernel_size=3, stride=1, padding=1),
                       nn.BatchNorm2d(64),
                       nn.ReLU())

    b2 = nn.Sequential(*resnet_block(64, 64, 2, first_block=True))
    b3 = nn.Sequential(*resnet_block(64, 128, 2))
    b4 = nn.Sequential(*resnet_block(128, 256, 2))
    b5 = nn.Sequential(*resnet_block(256, 512, 2))

    net = nn.Sequential(b1, b2, b3, b4, b5,
                        nn.AdaptiveAvgPool2d((1, 1)),
                        nn.Flatten(),
                        nn.Linear(512, num_classes))
    return net


def load_cifar10(is_train, augs, batch_size):
    dataset = torchvision.datasets.CIFAR10(root="../data", train=is_train, transform=augs, download=True)
    dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=is_train,
                                             num_workers=d2l.get_dataloader_workers())
    return dataloader


def init_weights(m):
    if type(m) in [nn.Linear, nn.Conv2d]:
        nn.init.xavier_uniform_(m.weight)


batch_size, devices, net = 128, d2l.try_all_gpus(), CustomResnet18(10, 3)
net.apply(init_weights)

train_augs = torchvision.transforms.Compose([
    torchvision.transforms.Resize((32, 32)),
    # 对原始32*32 图像四周各填充4个0像素（40*40），然后随机裁剪成32*32
    torchvision.transforms.RandomCrop(32, padding=4),
    # 按0.5的概率水平翻转图片
    torchvision.transforms.RandomHorizontalFlip(),
    # 将图像从numpy的array转化为pytorch训练需要的tensor
    torchvision.transforms.ToTensor(),
    torchvision.transforms.Normalize(mean=[0.49421427, 0.4851322, 0.45040995],
                                     std=[0.20199372, 0.19911827, 0.20113052]),
    # 随机选择图像中的一块区域，擦除其像素，主要用来进行数据增强
    # torchvision.transforms.RandomErasing(),

    # 使用Cutout擦除其像素进行数据增强。
    Cutout(n_holes=1, length=16),
    ])

test_augs = torchvision.transforms.Compose([
    torchvision.transforms.Resize((32, 32)),
    torchvision.transforms.ToTensor(),
    torchvision.transforms.Normalize(mean=[0.49421427, 0.4851322, 0.45040995],
                                     std=[0.20199372, 0.19911827, 0.20113052])
])


def train_with_data_aug(train_augs, test_augs, net, lr=0.001):
    train_iter = load_cifar10(True, train_augs, batch_size)
    test_iter = load_cifar10(False, test_augs, batch_size)
    loss = nn.CrossEntropyLoss(reduction="none")
    trainer = torch.optim.Adam(net.parameters(), lr=lr)
    d2l.train_ch13(net, train_iter, test_iter, loss, trainer, 100, devices)


train_with_data_aug(train_augs, test_augs, net)
plt.show()
