import numpy as np
from matplotlib import pyplot as plt
from BoTNet import *
from data import *

if __name__ == '__main__':
    if torch.cuda.is_available():
        device = torch.device('cuda')
    else:
        device = torch.device('cpu')

    print('Using PyTorch version:', torch.__version__, ' Device:', device)

    mean = [0.5070751592371323, 0.48654887331495095, 0.4409178433670343]
    std = [0.2673342858792401, 0.2564384629170883, 0.27615047132568404]
    batch_size = 32

    train_loader = get_train_loader(mean, std, batch_size=batch_size, num_workers=2, shuffle=True)
    test_loader = get_test_loader(mean, std, batch_size=batch_size, num_workers=2, shuffle=True)

    train_exist = False
    last_train_only = False
    model = MyResNet().to(device)
    optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.8)
    epochs = 12
    if train_exist:
        checkpoint = torch.load('checkpoint.pth')
        model.load_state_dict(checkpoint['model_state_dict'])
        model.to(device)
        optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        if last_train_only:
            # 冻结所有层的参数
            for param in model.parameters():
                param.requires_grad = False
            for param in model.layer4.parameters():
                param.requires_grad = True
            # 解冻最后一层（全连接层）的参数
            for param in model.fc.parameters():
                param.requires_grad = True
        else:
            for param in model.parameters():
                param.requires_grad = True
        for param_group in optimizer.param_groups:
            param_group['lr'] = 0.005
    criterion = nn.CrossEntropyLoss()


def accuracy(output, target, topk=(1,)):
    '''每个样本topk里有目标值里的准确率'''
    with torch.no_grad():
        maxk = max(topk)
        batch_size_target = target.size(0)

        _, pred = output.topk(maxk, 1, True, True)
        pred = pred.t()
        correct = pred.eq(target.view(1, -1).expand_as(pred))

        res = []
        for k in topk:
            correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
            res.append(correct_k.mul_(100.0 / batch_size_target))
        return res


def train(epoch, log_interval=200, train_loss_vector=None):
    model.train()
    for batch_idx, (data, target) in enumerate(train_loader):
        data = data.to(device)
        target = target.to(device)
        optimizer.zero_grad()
        output = model(data)
        loss = criterion(output, target)
        loss.backward()
        nn.utils.clip_grad_norm_(model.parameters(), max_norm=20, norm_type=2)
        optimizer.step()  # w - alpha * dL / dw
        if batch_idx % log_interval == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, batch_idx * len(data), len(train_loader.dataset),
                       100. * batch_idx / len(train_loader), loss.data.item()))
    if train_loss_vector is not None:
        train_loss_vector.append(loss.data.item())


def validate(loss_vector, top1_acc_vector, top5_acc_vector):
    model.eval()
    val_loss, correct1, correct5 = 0, 0, 0
    for data, target in test_loader:
        data = data.to(device)
        target = target.to(device)
        output = model(data)
        val_loss += criterion(output, target).data.item()

        acc1, acc5 = accuracy(output, target, topk=(1, 5))
        correct1 += acc1
        correct5 += acc5

    val_loss /= len(test_loader)
    loss_vector.append(val_loss)

    accuracy1 = correct1 / len(test_loader)
    accuracy5 = correct5 / len(test_loader)
    top1_acc_vector.append(accuracy1.item())
    top5_acc_vector.append(accuracy5.item())

    print('\nValidation set: Average loss: {:.4f}, Top1 Accuracy: {:.2f}%, Top5 Accuracy: {:.2f}%\n'.format(
        val_loss, accuracy1.item(), accuracy5.item()))


if __name__ == '__main__':
    train_lossv, val_lossv, top1_accv, top5_accv = [], [], [], []
    if train_exist:
        checkpoint = torch.load('checkpoint.pth')
        train_lossv = checkpoint['train_loss']
        val_lossv = checkpoint['val_loss']
        top1_accv = checkpoint['top1_acc']
        top5_accv = checkpoint['top5_acc']
        print(enumerate(train_lossv))
        print(enumerate(val_lossv))
        print(enumerate(top1_accv))
        print(enumerate(top5_accv))
    for epoch in range(1, epochs + 1):
        train(epoch, train_loss_vector=train_lossv)
        validate(val_lossv, top1_accv, top5_accv)

    plt.figure(figsize=(5, 3))
    plt.plot(np.arange(1, len(train_lossv) + 1), train_lossv, label='Train Loss')
    plt.plot(np.arange(1, len(val_lossv) + 1), val_lossv, label='Validation Loss')
    plt.title('Loss')
    plt.legend()
    plt.savefig('loss_img.png')

    plt.figure(figsize=(5, 3))
    plt.plot(np.arange(1, len(top1_accv) + 1), top1_accv, label='Top1 Accuracy')
    plt.plot(np.arange(1, len(top5_accv) + 1), top5_accv, label='Top5 Accuracy')
    plt.title('Validation Accuracy')
    plt.legend()
    plt.savefig('accuracy_img.png')

    torch.save({
        'model_state_dict': model.state_dict(),
        'optimizer_state_dict': optimizer.state_dict(),
        'epoch': epoch,
        'train_loss': train_lossv,
        'val_loss': val_lossv,
        'top1_acc': top1_accv,
        'top5_acc': top5_accv
    }, 'checkpoint.pth')
