import os
import torch
import torchvision
from torch import nn, optim
import time
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import _LRScheduler

"""
获取数据集 直接使用pytorch-cifar100 里实现的
cifar100 数据集放到 ../data 中
"""


def get_training_dataloader(mean, std, batch_size=16, num_workers=2, shuffle=True):
    """ return training dataloader
    Args:
        mean: mean of cifar100 training dataset
        std: std of cifar100 training dataset
        path: path to cifar100 training python dataset
        batch_size: dataloader batchsize
        num_workers: dataloader num_works
        shuffle: whether to shuffle
    Returns: train_data_loader:torch dataloader object
    """

    transform_train = transforms.Compose([
        # transforms.ToPILImage(),
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.RandomRotation(15),
        transforms.ToTensor(),
        transforms.Normalize(mean, std)
    ])
    # cifar100_training = CIFAR100Train(path, transform=transform_train)
    cifar100_training = torchvision.datasets.CIFAR100(root='../data', train=True, download=True,
                                                      transform=transform_train)
    cifar100_training_loader = DataLoader(
        cifar100_training, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)

    return cifar100_training_loader


def get_test_dataloader(mean, std, batch_size=16, num_workers=2, shuffle=True):
    """ return training dataloader
    Args:
        mean: mean of cifar100 test dataset
        std: std of cifar100 test dataset
        path: path to cifar100 test python dataset
        batch_size: dataloader batchsize
        num_workers: dataloader num_works
        shuffle: whether to shuffle
    Returns: cifar100_test_loader:torch dataloader object
    """

    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean, std)
    ])
    # cifar100_test = CIFAR100Test(path, transform=transform_test)
    cifar100_test = torchvision.datasets.CIFAR100(root='../data', train=False, download=True, transform=transform_test)
    cifar100_test_loader = DataLoader(
        cifar100_test, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)

    return cifar100_test_loader


"""
定义LeNet-5的模型
LeNet-5 模型本来是处理 黑白图像的 只有一个通道
现在处理cifar100 通道数是3 
"""
"""
LeNet1
目前的修改：
既然通道数是3  那么参数空间也对应变为原来的三倍试试看
结果：不能收敛
"""
"""
修改优化器后 跑LeNet可以收敛了
优化方法：SGD+动量
跑到大概80 epoch就收敛了 正确率大概在30-34%之间
epoch 97, train_loss_avg 0.0223, train_acc_avg 0.2829, test_acc 0.3197, time 12.5550
Evaluating Network.....
Test set: Epoch: 97, Average loss: 0.0207, Accuracy: 0.3322, Time consumed:1.44s
"""
"""
增加优化器后 再跑LeNet
Test set: Epoch: 201, Average loss: 0.0194, Accuracy: 0.3753, Time consumed:1.45s
还能稍微高一点

单纯的增加宽度不行--拓宽的LeNet
增加深度效果更好--AlexNet
深度越深 组合出的feature的level更高 更有抽象表征能力
"""

class LeNet(nn.Module):
    def __init__(self):
        super(LeNet, self).__init__()

        self.conv = nn.Sequential(

            nn.Conv2d(in_channels=1 * 3, out_channels=6 * 3, kernel_size=5),
            nn.Sigmoid(),
            nn.MaxPool2d(kernel_size=2, stride=2),

            nn.Conv2d(in_channels=6 * 3, out_channels=16 * 3, kernel_size=5),
            nn.Sigmoid(),
            nn.MaxPool2d(kernel_size=2, stride=2)

        )

        self.fc = nn.Sequential(
            nn.Linear(in_features=16 * 5 * 5 * 3, out_features=120 * 3),
            nn.Sigmoid(),
            nn.Linear(in_features=120 * 3, out_features=84 * 3),
            nn.Sigmoid(),
            nn.Linear(in_features=84 * 3, out_features=100)
        )

    def forward(self, imgs):
        feature = self.conv(imgs)
        # print(feature.shape)
        output = self.fc(feature.view(imgs.shape[0], -1))
        # imgs的shape是 batch x Channel x Height X Width
        # 这里只是想把所有样例的feature全部拉伸为 1维的向量 作为全连接层
        return output


"""
换个模型测试一下 看看 训练 和 评估 的函数能不能正常工作
优化方法：SGD+动量
大概到150 epoch的时候 网络就不再收敛了 但是只有35-40%左右的正确率
epoch 155, train_loss_avg 0.0208, train_acc_avg 0.3362, test_acc 0.3594, time 14.9374
Evaluating Network.....
Test set: Epoch: 155, Average loss: 0.0199, Accuracy: 0.3719, Time consumed:1.53s
"""
"""
把AlexNet嵌入到pytorch-cifar100 项目的代码中 跑跑看 能有多少的正确率
Test set: Epoch: 200, Average loss: 0.0161, Accuracy: 0.5640, Time consumed:1.54s
Top 1 err:  tensor(0.4314)
和vgg13一样都是 比自己写的train函数训练的准确度要高 为什么？

"""


class AlexNet(nn.Module):
    def __init__(self, classes=100):
        super(AlexNet, self).__init__()
        self.features = nn.Sequential(
            nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),
            nn.Conv2d(64, 192, kernel_size=3, stride=1, padding=1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),
            nn.Conv2d(192, 384, kernel_size=3, stride=1, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(384, 256, kernel_size=3, stride=1, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),
        )
        self.classifier = nn.Sequential(
            nn.Dropout(),
            nn.Linear(256 * 1 * 1, 4096),
            nn.ReLU(inplace=True),
            nn.Dropout(),
            nn.Linear(4096, 4096),
            nn.ReLU(inplace=True),
            nn.Linear(4096, classes),
        )

    def forward(self, x):
        x = self.features(x)
        x = torch.flatten(x, 1)
        x = self.classifier(x)
        return x


"""
试一试vgg13 看看能到多少
优化方法：SGD+动量
前90个epoch的正确率快速上升 上升到47%左右
后面一直到200 epoch就稳定下来了  正确率大概在50-53%之间
epoch 210, train_loss_avg 0.0148, train_acc_avg 0.5037, test_acc 0.5021, time 16.4458s
Evaluating Network.....
Test set: Epoch: 210, Average loss: 0.0143, Accuracy: 0.5272, Time consumed:1.77s

用github上cifar100的项目跑跑看
vgg13
大概70 epoch就稳定了 正确率在63-65%
为什么会有这么大的差距呢？ 优化方法都是一样
"""
"""
vgg16 用GitHub上的项目跑 其top-1 err是0.2794 对应的正确率是 0.7206

试一试vgg16 在这能跑到多少
Test set: Epoch: 200, Average loss: 0.0171, Accuracy: 0.4402, Time consumed:1.60s
很奇怪 跑完200 epoch 也才44%左右的正确率 
不知道问题出在哪
"""
"""
离谱
稍微改动一下，结果就上去了。。。。
vgg16在自己的train函数上也能跑到 和GitHub项目一样的水平
Test set: Epoch: 201, Average loss: 0.0129, Accuracy: 0.7192, Time consumed:1.63s

主要的改动
if epoch > 1:
    train_scheduler.step(epoch)

net.train()

if epoch <= 1:
    warmup_scheduler.step()


optimizer = optim.SGD(net.parameters(), lr=lr, momentum=0.9, weight_decay=5e-4)
train_scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=settings.MILESTONES,
                                                 gamma=0.2)  # learning rate decay
iter_per_epoch = len(cifar100_training_loader)
warmup_scheduler = WarmUpLR(optimizer, iter_per_epoch * int(1))
单纯的optimizer效果不行
加入train_scheduler和warmup_scheduler正确率能提升10个点 简直离谱
"""
cfg = {
    'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
    'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
    'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
    'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M']
}


class VGG(nn.Module):

    def __init__(self, features, num_class=100):
        super().__init__()
        self.features = features

        self.classifier = nn.Sequential(
            nn.Linear(512, 4096),
            nn.ReLU(inplace=True),
            nn.Dropout(),
            nn.Linear(4096, 4096),
            nn.ReLU(inplace=True),
            nn.Dropout(),
            nn.Linear(4096, num_class)
        )

    def forward(self, x):
        output = self.features(x)
        output = output.view(output.size()[0], -1)
        # output.size()[0]指第一维 也就是批次大小batchSize   数据整体是四维 [batch channel height weight]
        output = self.classifier(output)

        return output


def make_layers(cfg, batch_norm=False):
    layers = []

    input_channel = 3
    for l in cfg:
        if l == 'M':
            layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
            continue

        layers += [nn.Conv2d(input_channel, l, kernel_size=3, padding=1)]

        if batch_norm:
            layers += [nn.BatchNorm2d(l)]

        layers += [nn.ReLU(inplace=True)]
        input_channel = l

    return nn.Sequential(*layers)


def vgg11_bn():
    return VGG(make_layers(cfg['A'], batch_norm=True))


def vgg13_bn():
    return VGG(make_layers(cfg['B'], batch_norm=True))


def vgg16_bn():
    return VGG(make_layers(cfg['D'], batch_norm=True))


def vgg19_bn():
    return VGG(make_layers(cfg['E'], batch_norm=True))


"""
训练过程中对模型进行评估
"""


@torch.no_grad()
def eval_training(net, test_iter, loss_function, epoch=0):
    """

    :param epoch: 当前评估的是哪个epoch的模型 需要从外界输入
    :return:
    """
    cifar100_test_loader = test_iter
    start = time.time()
    net.eval()

    test_loss = 0.0  # cost function error
    correct = 0.0

    with torch.no_grad():
        for (images, labels) in cifar100_test_loader:
            images = images.cuda()
            labels = labels.cuda()

            outputs = net(images)
            loss = loss_function(outputs, labels)

            test_loss += loss.item()
            _, preds = outputs.max(1)
            correct += preds.eq(labels).sum()

    finish = time.time()

    # print('Evaluating Network.....')
    print('Test set: Epoch: {}, Average loss: {:.4f}, Accuracy: {:.4f}, Time consumed:{:.2f}s'.format(
        epoch,
        test_loss / len(test_iter.dataset),
        correct.float() / len(test_iter.dataset),
        finish - start
    ))
    print()

    return correct.float() / len(test_iter.dataset)


"""
训练模型
"""


def train_model(net, train_iter, test_iter, loss_function, optimizer, device, num_epochs):
    """
    训练一个网络 需要哪些东西？
    net: 需要训练的网络
    train_iter: 训练集
    test_iter: 测试集
    loss: 模型输出 和 GT 之间所产生的loss
    batch_size: batch大小
    optimizer : 优化方法
    num_epochs: epochs数
    """
    net = net.to(device)  # 把模型放到显卡上

    print("training on:", device)

    for epoch in range(1, num_epochs + 1):

        if epoch > 1:
            train_scheduler.step(epoch)

        net.train()
        train_loss_sum, train_acc_sum, train_example_num, start = 0.0, 0.0, 0, time.time()
        """
        train_loss_sum, train_acc_sum 是指训练集上的loss和正确率
        """
        # batch_count = 0
        """
        batch_count 因为是mini-batch SGD的策略
        评估训练集上loss的时候 获得的是一个一个batch的loss
        那么对这些loss去平均作为训练集loss
        """

        for X, y in train_iter:
            # X = X.to(device)
            # y = y.to(device)
            X = X.cuda()
            y = y.cuda()
            y_hat = net(X)

            optimizer.zero_grad()
            train_loss = loss_function(y_hat, y)
            train_loss.backward()
            optimizer.step()

            # train_loss_sum += train_loss.cpu().item()
            # train_acc_sum += (y_hat.argmax(dim=1) == y).sum().cpu().item()
            #
            # train_example_num += y.shape[0]
            # batch_count += 1
            if epoch <= 1:
                warmup_scheduler.step()


        test_acc = eval_training(net=net, test_iter=test_iter, loss_function=loss_function, epoch=epoch + 1)
        # print('train_example_num:', train_example_num)
        # print('epoch {}, train_loss_avg {:.4f}, train_acc_avg {:.4f}, test_acc {:.4f}, time {:.4f}s'.format(
        #     epoch+1,
        #     train_loss_sum / train_example_num,  # 直接获得每个样例的loss
        #     train_acc_sum / train_example_num,
        #     test_acc,
        #     time.time() - start
        # ))


class WarmUpLR(_LRScheduler):
    """warmup_training learning rate scheduler
    Args:
        optimizer: optimzier(e.g. SGD)
        total_iters: totoal_iters of warmup phase
    """

    def __init__(self, optimizer, total_iters, last_epoch=-1):
        self.total_iters = total_iters
        super().__init__(optimizer, last_epoch)

    def get_lr(self):
        """we will use the first m batches, and set the learning
        rate to base_lr * m / total_iters
        """
        return [base_lr * self.last_epoch / (self.total_iters + 1e-8) for base_lr in self.base_lrs]


if __name__ == '__main__':
    class settings:
        CIFAR100_TRAIN_MEAN = (0.5070751592371323, 0.48654887331495095, 0.4409178433670343)
        CIFAR100_TRAIN_STD = (0.2673342858792401, 0.2564384629170883, 0.27615047132568404)
        batch_size = 128
        MILESTONES = [60, 120, 160]


    # data preprocessing:
    cifar100_training_loader = get_training_dataloader(
        settings.CIFAR100_TRAIN_MEAN,
        settings.CIFAR100_TRAIN_STD,
        num_workers=4,
        batch_size=settings.batch_size,
        shuffle=True
    )

    cifar100_test_loader = get_test_dataloader(
        settings.CIFAR100_TRAIN_MEAN,
        settings.CIFAR100_TRAIN_STD,
        num_workers=4,
        batch_size=settings.batch_size,
        shuffle=True
    )

    net = LeNet()

    lr, num_epochs = 0.1, 200
    optimizer = optim.SGD(net.parameters(), lr=lr, momentum=0.9, weight_decay=5e-4)
    train_scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=settings.MILESTONES,
                                                     gamma=0.2)  # learning rate decay
    iter_per_epoch = len(cifar100_training_loader)
    warmup_scheduler = WarmUpLR(optimizer, iter_per_epoch * int(1))
    """
    bug1：
    原先的的优化器是
    lr, num_epochs = 0.001, 5
    optimizer = torch.optim.Adam(net.parameters(), lr=lr)
    
    结果 并不能收敛
    
    网络能不能收敛 这个优化方法 也挺关键的
    """
    loss_function = nn.CrossEntropyLoss()

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    train_model(net=net, train_iter=cifar100_training_loader, test_iter=cifar100_test_loader,
                loss_function=loss_function,
                optimizer=optimizer, device=device, num_epochs=num_epochs)
