import matplotlib.pyplot as plt
import numpy as np
import torch
import torchvision
from torch import nn
from torch.nn import functional as F
from d2l import torch as d2l
from torch.utils import data
from torch.optim import lr_scheduler

from 实验.Cutout import Cutout

"""
图像增广办法
train_augs = torchvision.transforms.Compose([   
    # 在高度和宽度上将图像放大到40像素的正方形
    # torchvision.transforms.Resize(40),
    # 随机裁剪出一个高度和宽度均为40像素的正方形图像，
    # 生成一个面积为原始图像面积0.64到1倍的小正方形，
    # 然后将其缩放为高度和宽度均为32像素的正方形
    torchvision.transforms.Resize(40),  # 在高度和宽度上将图像放大到40像素的正方形
    torchvision.transforms.Pad(4),  # 对图片进行边界零填充
    torchvision.transforms.Scale(40),  # 对图片的尺寸进行缩小或者放大
    torchvision.transforms.RandomRotation(0.5),  # 对图像进行角度旋转，0.5代表在(-0.5, +0.5)范围内进行旋转，可以自行设置
    torchvision.transforms.RandomGrayscale(),   # 随机对图像转换成灰度图，默认为50%
    torchvision.transforms.ColorJitter(brightness=0.5, contrast=0.5, saturation=0.5, hue=0.5),  # 修改亮度、对比度和饱和度
    torchvision.transforms.RandomResizedCrop(32, scale=(0.1, 1), ratio=(0.5, 2)),  # 首先对图片进行随机尺寸的裁剪，然后再对裁剪的图片进行一个随机比例的缩放，最后将图片变成给定的大小
    torchvision.transforms.RandomCrop(32, padding=4),  # 对原始 32*32 图像四周各填充4个0像素（40*40），然后随机裁剪成32*32
    torchvision.transforms.RandomRotation(degrees=15),  # 随机旋转
    torchvision.transforms.RandomHorizontalFlip(),  # 按0.5的概率水平翻转图片
    torchvision.transforms.CenterCrop(size=32),  # 对图像正中心进行给定大小的裁剪
    torchvision.transforms.ToTensor(),  # 将图像从numpy的array转化为pytorch训练需要的tensor
    torchvision.transforms.Normalize(mean=(0.35561267, 0.35555243, 0.35479292), std=(0.24817333, 0.24799584, 0.2476249)),
    随机选择图像中的一块区域，擦除其像素，主要用来进行数据增强。
    torchvision.transforms.RandomErasing(p=0.5, scale=(0.02, 0.33), ratio=(0.3, 3.3), value=0, inplace=False)、
])
"""


def CustomResnet18(num_classes, input_channels=1):

    def resnet_block(input_channels, num_channels, num_residuals, first_block=False):
        blk = []
        for i in range(num_residuals):
            if i == 0 and not first_block:
                blk.append(d2l.Residual(input_channels, num_channels, use_1x1conv=True, strides=2))
            else:
                blk.append(d2l.Residual(num_channels, num_channels))
        return blk

    b1 = nn.Sequential(nn.Conv2d(input_channels, 64, kernel_size=3, stride=1, padding=1),
                       nn.BatchNorm2d(64),
                       nn.ReLU())

    b2 = nn.Sequential(*resnet_block(64, 64, 2, first_block=True))
    b3 = nn.Sequential(*resnet_block(64, 128, 2))
    b4 = nn.Sequential(*resnet_block(128, 256, 2))
    b5 = nn.Sequential(*resnet_block(256, 512, 2))

    net = nn.Sequential(b1, b2, b3, b4, b5,
                        nn.AdaptiveAvgPool2d((1, 1)),
                        nn.Flatten(),
                        nn.Linear(512, num_classes))
    return net


def load_cifar10(is_train, augs, batch_size):
    dataset = torchvision.datasets.CIFAR10(root="../data", train=is_train, transform=augs, download=True)
    dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=is_train,
                                             num_workers=d2l.get_dataloader_workers())
    return dataloader


def init_weights(m):
    if type(m) in [nn.Linear, nn.Conv2d]:
        nn.init.xavier_uniform_(m.weight)


def train(net, train_iter, test_iter, num_epochs, loss, trainer, device, scheduler=None):
    net.to(device)
    animator = d2l.Animator(xlabel='epoch', xlim=[0, num_epochs],
                            legend=['train loss', 'train acc', 'test acc'])

    for epoch in range(num_epochs):
        metric = d2l.Accumulator(3)  # train_loss,train_acc,num_examples
        for i, (X, y) in enumerate(train_iter):
            net.train()
            trainer.zero_grad()
            X, y = X.to(device), y.to(device)
            y_hat = net(X)
            l = loss(y_hat, y)
            l.backward()
            trainer.step()
            with torch.no_grad():
                metric.add(l * X.shape[0], d2l.accuracy(y_hat, y), X.shape[0])
            train_loss = metric[0] / metric[2]
            train_acc = metric[1] / metric[2]
            if (i + 1) % 50 == 0:
                animator.add(epoch + i / len(train_iter),
                             (train_loss, train_acc, None))

        test_acc = d2l.evaluate_accuracy_gpu(net, test_iter)
        animator.add(epoch + 1, (None, None, test_acc))

        if scheduler:
            if scheduler.__module__ == lr_scheduler.__name__:
                # UsingPyTorchIn-Builtscheduler
                scheduler.step()
            else:
                # Usingcustomdefinedscheduler
                for param_group in trainer.param_groups:
                    param_group['lr'] = scheduler(epoch)

    print(f'train loss {train_loss:.3f}, train acc {train_acc:.3f}, 'f'test acc {test_acc:.3f}')


train_augs = torchvision.transforms.Compose([
    torchvision.transforms.RandomCrop(32, padding=4),  # 对原始 32*32 图像四周各填充4个0像素（40*40），然后随机裁剪成32*32
    torchvision.transforms.RandomHorizontalFlip(),  # 按0.5的概率水平翻转图片
    torchvision.transforms.ToTensor(),  # 将图像从numpy的array转化为pytorch训练需要的tensor

    # torchvision.transforms.Normalize(mean=(0.4914, 0.4822, 0.4465), std=(0.2023, 0.1994, 0.2010)),


    torchvision.transforms.Normalize(mean=[x / 255.0 for x in [125.3, 123.0, 113.9]],
                                     std=[x / 255.0 for x in [63.0, 62.1, 66.7]]),
    # 随机选择图像中的一块区域，擦除其像素，主要用来进行数据增强。
    torchvision.transforms.RandomErasing(),
    # 优化调度器
    # scheduler1 = lr_scheduler.LambdaLR(trainer, lr_lambda=lambda num_update: pow(num_update + 1.0, -0.5))
    # train loss 0.118, train acc 0.959, test acc 0.928

    # scheduler2 = PolynomialScheduler(lr=0.1)
    #


    # torchvision.transforms.Normalize(mean=(0.286,), std=(0.352,)),
    # # 使用Cutout擦除其像素进行数据增强。
    # Cutout(n_holes=1, length=16),
    # 优化调度器
    # scheduler1 = lr_scheduler.LambdaLR(trainer, lr_lambda=lambda num_update: pow(num_update + 1.0, -0.5))
    # train loss:0.154, train acc: 0.947, test acc:0.922

    # scheduler2 = PolynomialScheduler(lr=0.1)
    #
])

test_augs = torchvision.transforms.Compose([
    torchvision.transforms.ToTensor(),
    torchvision.transforms.Normalize(mean=(0.286,), std=(0.352,))
])


batch_size, devices, net, num_epochs = 128, d2l.try_gpu(), CustomResnet18(10, 3), 100
print(devices)
net.apply(init_weights)

#
# class PolynomialScheduler:
#     def __init__(self, lr=0.1):
#         self.lr = lr
#
#     def __call__(self, num_update):
#         return pow(self.lr * num_update + 1.0, -0.5)


def train_with_data_aug(train_augs, test_augs, net, lr=0.1):
    train_iter = load_cifar10(True, train_augs, batch_size)
    test_iter = load_cifar10(False, test_augs, batch_size)
    loss = nn.CrossEntropyLoss()
    trainer = torch.optim.SGD(net.parameters(), lr=lr, momentum=0.9, weight_decay=5e-4)
    # scheduler = PolynomialScheduler(lr=0.1)
    scheduler = lr_scheduler.LambdaLR(trainer, lr_lambda=lambda num_update: pow(num_update + 1.0, -0.5))
    train(net, train_iter, test_iter, num_epochs, loss, trainer, devices, scheduler)


train_with_data_aug(train_augs, test_augs, net)
plt.show()


# scheduler = lr_scheduler.MultiStepLR(trainer, milestones=[], gamma=0.5)
