import math
from model import LeNet5
import torch
from torch import nn, optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision import transforms, datasets
import matplotlib.pyplot as plt
import numpy as np

model = LeNet5(1, 10)  # 定义LeNet对象model

'''定义超参数'''
use_gpu = torch.cuda.is_available()  # gpu加速判断参数
batch = 128  # 训练集和测试集批处理数量
learning_rate = 1e-2  # 学习率
num_epoches = 100  # 训练代数

'''参数值初始化函数'''


def weight_init(m):
    if isinstance(m, nn.Conv2d):
        n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
        m.weight.data.normal_(0, math.sqrt(2. / n))
    elif isinstance(m, nn.BatchNorm2d):
        m.weigth.data.fill_(1)
        m.bias.data.zero_()


'''定义数据集加载器'''
train_dataset = datasets.MNIST(
    root='../dataset/mnist/', train=True, transform=transforms.ToTensor(), download=True
)
test_dataset = datasets.MNIST(
    root='../dataset/mnist/', train=False, transform=transforms.ToTensor(), download=True
)

train_loader = DataLoader(train_dataset, batch_size=batch, shuffle=True)  # 加载训练集并进行打乱处理
test_loader = DataLoader(test_dataset, batch_size=batch, shuffle=False)  # 加载测试集

if use_gpu:
    model = model.cuda()
    print('use GPU')
else:
    print('use CPU')

'''定义loss和optimizer'''
criterion = nn.CrossEntropyLoss()  # 定义代价函数，使用交叉熵验证
optimizer = optim.Adam(model.parameters(), lr=learning_rate, betas=(0.9, 0.99))  # 定义优化器，使用Adam优化器优化
model.apply(weight_init)

'''开始训练与验证'''
train_loss_list = []
train_acc_list = []
test_loss_list = []
test_acc_list = []
for epoch in range(num_epoches):
    print('epoch {}'.format(epoch + 1))
    print('-' * 25)
    train_loss = 0.0  # 定义该轮训练的损失函数
    train_acc = 0.0  # 定义该轮训练的准确率
    for i, data in enumerate(train_loader, 1):  # 遍历训练集的每一个数据
        img, label = data
        # 调用cuda
        if use_gpu:
            img = img.cuda()
            label = label.cuda()
        img = Variable(img)
        label = Variable(label)

        # 向前传播
        out = model(img)
        loss = criterion(out, label)  # 计算该批次的损失值
        train_loss += loss.item() * label.size(0)  # 记录该轮训练的总损失值
        _, pred = torch.max(out, 1)  # 输出每行最大值的索引
        num_correct = (pred == label).sum()  # 得到该批输出结果与label相匹配的数目
        train_acc += num_correct.item()  # 记录该轮训练输出结果与label相匹配的总数目
        # train_accuracy = (pred == label).float().mean()

        # 向后传播
        optimizer.zero_grad() # 优化器梯度清零
        loss.backward()
        optimizer.step() # 根据loss的梯度向后传播
    train_loss_list.append(train_loss / len(train_dataset))  # 记录每轮训练的损失值
    train_acc_list.append(train_acc / len(train_dataset))  # 记录每轮训练的准确率
    print('Finish {} epoch, Loss: {:.6f}, Acc: {:.6f}'.format(
        epoch + 1, train_loss / (len(train_dataset)), train_acc / (len(train_dataset)))) # 输出每轮的训练损失值和准确率

    # 测试（验证）过程
    model.eval()
    eval_loss = 0.0
    eval_acc = 0.0
    for data in test_loader:
        img, label = data
        if use_gpu:
            img = Variable(img, volatile=True).cuda()
            label = Variable(label, volatile=True).cuda()
        else:
            img = Variable(img, volatile=True)
            label = Variable(label, volatile=True)
        out = model(img)
        loss = criterion(out, label)
        eval_loss += loss.item() * label.size(0)  # 记录该轮测试的总损失值
        _, pred = torch.max(out, 1)
        num_correct = (pred == label).sum()  # 得到该批输出结果与label相匹配的数目
        eval_acc += num_correct.item()  # 记录该轮测试输出结果与label相匹配的总数目
        # test_accuracy = (pred == label).float().mean() # 每批的准确率
    test_loss_list.append(eval_loss / len(test_dataset))
    test_acc_list.append(eval_acc / len(test_dataset))
    print('Test Loss: {:.6f}, Acc: {:.6f}'.format(eval_loss / (len( # 输出每轮的测试损失值和准确率
        test_dataset)), eval_acc / (len(test_dataset))))
    print()

    '''当代数大于50时便保存pt模型文件'''
    if epoch >= 50:
        torch.save(model, './model_pt/epoch{}Loss{:.6f}Acc{:.6f}.pt'.format(epoch + 1, train_loss / len(train_dataset),
                                                                            train_acc / len(train_dataset)))

'''画出训练与测试的损失函数值与准确率曲线'''
x = np.arange(num_epoches)
plt.figure(1)
plt.plot(x, train_loss_list, marker='o', label='train', markevery=5)
plt.plot(x, test_loss_list, marker='s', label='test', markevery=5)
plt.xlabel("epochs")
plt.ylabel("loss")
plt.legend(loc='upper right')
plt.savefig('./loss.png')

plt.figure(2)
plt.plot(x, train_acc_list, marker='o', label='train', markevery=5)
plt.plot(x, test_acc_list, marker='s', label='test', markevery=5)
plt.xlabel("epochs")
plt.ylabel("accuracy")
plt.ylim(0, 1.0)
plt.legend(loc='lower right')
plt.savefig('./accuracy.png')
