import torch
import torch.nn as nn
import torch.optim as optim
import torchvision

from config import Config
from model import LeNet
from tools import getTransform, getDataSet, imshow, DataPrinter

transform = getTransform()

trainset, trainloader, testset, testloader = getDataSet(Config.CONFIG_ROOT_PATH, Config.CONFIG_BATCH_SIZE, transform)

# 随机抽取图片
dataiter = iter(trainloader)
images, labels = dataiter.next()
imshow(torchvision.utils.make_grid(images))
print(' '.join(f'{Config.CONFIG_CLASSES[labels[j]]:5s}' for j in range(Config.CONFIG_BATCH_SIZE)))

# 确定训练设备
device = torch.device('cuda:0' if torch.cuda.is_available else 'cpu')
net = LeNet()
print(f'Trainning Device: {device}')
# 网络迁移到训练设备上
net.to(device)
# 使用交叉熵损失
loss_function = nn.CrossEntropyLoss()
# 使用Adam优化器 学习率设置0.001
optimizer = optim.Adam(net.parameters(), lr=0.001)

batches = int(len(trainset) / Config.CONFIG_BATCH_SIZE)  # 总的batch数目
epoches = 10  # epoch 数目


# 一个epoch的训练函数
def train(trainloader):
    running_loss = 0.0  # 总体训练损失
    for step, data in enumerate(trainloader, 0):
        # 从迭代器取数据 迁移到训练设备
        inputs, labels = data[0].to(device), data[1].to(device)
        # 清空梯度 否则梯度会累积
        optimizer.zero_grad()
        # 正向传播
        outputs = net(inputs)
        # 计算损失
        loss = loss_function(outputs, labels)
        # 反向传播 计算梯度
        loss.backward()
        # 优化器优化 更新参数
        optimizer.step()
        # 累加损失
        running_loss += loss.item()
        # 每(batches / 5)步打印损失
        if (step + 1) % (batches / 5) == 0:
            print(f'[{step + 1}/{batches}] running_loss: {(running_loss / step):5.5}')
    # 返回本个epoch的最终训练损失
    return running_loss / batches


# 测试函数
def test(testloader):
    test_acc = 0.0
    cnt = 0
    for _, data in enumerate(testloader, 0):
        # 关闭自动梯度
        with torch.no_grad():
            inputs, labels = data[0].to(device), data[1].to(device)
            outputs = net(inputs)
            # 使用测试集数据计算准确率
            pred = torch.max(outputs, dim=1)[1]
            acc = (pred == labels).sum().item() / labels.size(0)
            test_acc = test_acc + acc
            cnt = cnt + 1
    test_acc = test_acc / cnt
    # 返回测试集上的准确率
    return test_acc


printer = DataPrinter()
# 训练过程
for epoch in range(epoches):
    print(f'Epoch[{epoch + 1}/{epoches}]')
    train_loss = train(trainloader)
    test_acc = test(testloader)
    printer(train_loss, test_acc)
    print(f'Train loss: {train_loss:5.5}, Test accuracy: {test_acc:5.3%}')

printer.show()
printer.save(Config.CONFIG_SAVE_FIG_PATH)
torch.save(net.state_dict(), Config.CONFIG_SAVE_PATH)
