import torch.nn as nn
import torch
import torchvision
import torchvision.transforms as T
from Models.densenet import densenet121
from Models.vgg import vgg19_bn
from Models.resnet import resnet50
import torch.optim as optim
from tqdm import tqdm
import matplotlib.pyplot as plt
import os

device = torch.device("cuda:0"if torch.cuda.is_available()else "cpu")
net = densenet121(pretrained=False)
net = net.to(device)
optimizer = optim.SGD(net.parameters(),lr=0.1,momentum=0.9,weight_decay=5e-4)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=200)
criterion = nn.CrossEntropyLoss()
epoch = 200
torch.backends.cudnn.benchmark = False


def train(root):
    print('\nEpoch: %d' % epoch)


    train_loss_list = []
    train_correct_list = []
    best_acc = 0

    for i in range(epoch):
        train_loss = 0
        correct = 0
        total = 0
        net.train()
        train_bar = tqdm(enumerate(train_loader))
        for batch_idx, (inputs, targets) in train_bar:
            inputs, targets = inputs.to(device), targets.to(device)
            optimizer.zero_grad()
            outputs = net(inputs)
            loss = criterion(outputs, targets)
            loss.backward()
            optimizer.step()

            train_loss += loss.item()
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()
            train_bar.set_description(f'epoch {i} ,the loss is {loss} ')
            train_loss_list.append(loss.detach().cpu().numpy().item())
        acc = correct / total
        train_correct_list.append(acc)
        """
        test
        """
        net.eval()
        test_loss = 0
        correct = 0
        total = 0
        with torch.no_grad():
            for batch_idx, (inputs, targets) in enumerate(test_loader):
                inputs, targets = inputs.to(device), targets.to(device)
                outputs = net(inputs)
                loss = criterion(outputs, targets)

                test_loss += loss.item()
                _, predicted = outputs.max(1)
                total += targets.size(0)
                correct += predicted.eq(targets).sum().item()



        # Save checkpoint.
        acc = 100. * correct / total
        if acc > best_acc:
            print('Saving..')
            state = {
                'net': net.state_dict(),
                'acc': acc,
                'epoch': epoch,
            }
            if not os.path.isdir('checkpoint'):
                os.mkdir('checkpoint')
            torch.save(state, './checkpoint/'+root+'.pth')
            best_acc = acc

        """
        lr scheduler
        """
        scheduler.step()
        if i % 10 == 0:
            plt.clf()
            fig,(ax1,ax2) = plt.subplots(2,1)
            ax1.plot(range(len(train_loss_list)),train_loss_list)
            ax2.plot(range(len(train_correct_list)), train_correct_list)
            plt.savefig('./'+root)


if __name__ == '__main__':

    transform = T.Compose([T.ToTensor(), T.Normalize(mean=(0.4914, 0.4822, 0.4465), std=(0.2471, 0.2435, 0.2616))])
    data1 = torchvision.datasets.CIFAR10(root='./data', train=True,download=True, transform=transform)
    train_loader = torch.utils.data.DataLoader(data1, batch_size=100, shuffle=True, num_workers=2)
    data2 = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)
    test_loader = torch.utils.data.DataLoader(data2, batch_size=100, shuffle=True, num_workers=2)


    """
    train
    """
    train(root='densenet121')

    net = resnet50(pretrained=False)
    train(root='resnet50')

    net = vgg19_bn(pretrained=False)
    train(root='vgg19')




