import os

import torch
import torch.nn as nn
import torchvision
import tensorboardX

from m03_vggnet import VGGNet
from m02_load_cifar10 import train_data_loader, test_data_loader

# 训练模型的device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# epoch_num = 200
epoch_num = 20
lr = 0.01
batch_size = 128


def main():
    # 日志目录
    if not os.path.exists("log"):
        os.mkdir("log")

    # 将日志记录给tensorboard
    writer = tensorboardX.SummaryWriter("log")

    # 定义网络
    net = VGGNet().to(device)

    # LOSS
    loss_func = nn.CrossEntropyLoss()

    # optimizer
    optimizer = torch.optim.Adam(net.parameters(), lr=lr)
    # optimizer = torch.optim.SGD(
    #     net.parameters(), lr=lr,
    #     momentum=0.9,
    #     weight_decay=5e-4  # 正则项
    # )

    # 定义学习率的衰减。（前期大，后期小）:指数衰减
    scheduler = torch.optim.lr_scheduler.StepLR(
        optimizer,
        # step_size=5,  # 每执行5个epoch后，进行学习率衰减，变为前面的gamma倍
        step_size=1,
        gamma=0.9
    )

    # 开始训练
    step_n = 0
    for epoch in range(epoch_num):
        print(" epoch is ", epoch + 1)
        net.train()  # BN（BatchNorm)、dropout两层，在train和test阶段，会有不同的表现。
        # BN: train时会进行参数更新，test时，不进行更新
        # dropout：train时会有一个概率，test时，网络的节点会全部激活
        # 所以，如果定义了BN/dropout层，在训练的时候，要先执行net.train(), 在测试的时候，先执行net.eval()

        # ==================== train =================
        for i, data in enumerate(train_data_loader):
            inputs, labels = data
            inputs, labels = inputs.to(device), labels.to(device)

            # 传入数据，训练网络
            outputs = net(inputs)
            # 计算损失
            loss = loss_func(outputs, labels)

            # 清空梯度
            optimizer.zero_grad()
            # 反向传播
            loss.backward()
            # 更新参数
            optimizer.step()

            # 计算准确率。输出的是一个10维的概率向量，概率最高的位置对应的标签就是我们的预测值
            _, pred = torch.max(outputs.data, dim=1)
            correct = pred.eq(labels.data).cpu().sum()

            print(f"train epoch: {epoch + 1}, step: {i}, loss is: {loss.item()}, "
                  f"mini-batch correct is: {100.0 * correct / batch_size}")

            writer.add_scalar("train loss", loss.item(), global_step=step_n)
            writer.add_scalar(
                "train correct",
                100.0 * correct.item() / batch_size,
                global_step=step_n
            )

            # 存放图像
            im = torchvision.utils.make_grid(inputs)
            writer.add_image("train im", im, global_step=step_n)

            step_n += 1

        if not os.path.exists("models"):
            os.mkdir("models")

        # 每训练一轮，输出一个模型
        torch.save(net.state_dict(), f"models/{epoch + 1}.pth", )

        # 更新学习率
        scheduler.step()

        print(f"train epoch: {epoch + 1},lr is {optimizer.state_dict()['param_groups'][0]['lr']}")

        ### =============== 2.每完成1轮epoch后，都对测试集进行一个测试 ============
        # ================ test =================
        net.eval()  # 我们要做测试
        sum_loss = 0
        sum_correct = 0
        for i, data in enumerate(test_data_loader):
            inputs, labels = data
            print("labels: ", labels)
            inputs, labels = inputs.to(device), labels.to(device)

            # 传入数据，训练网络
            outputs = net(inputs)
            # 计算损失
            loss = loss_func(outputs, labels)

            # 测试阶段不需要反向传播

            # 计算准确率。输出的是一个10维的概率向量，概率最高的位置对应的标签就是我们的预测值
            _, pred = torch.max(outputs.data, dim=1)
            correct = pred.eq(labels.data).cpu().sum()

            print(f"test epoch: {epoch + 1}, step: {i}, loss is: {loss.item()}, "
                  f"mini-batch correct is: {100.0 * correct / batch_size}")

            sum_loss += loss.item()
            sum_correct += correct.item()

            # 存放图像
            im = torchvision.utils.make_grid(inputs)
            writer.add_image("train im", im, global_step=step_n)

        test_loss = 1.0 * sum_loss / len(test_data_loader)
        test_correct = 100.0 * sum_correct / len(test_data_loader) / batch_size  # 没看明白为什么还要除以batch_size
        print(f"test epoch: {epoch + 1}, avg loss: {test_loss}, avg correct: {test_correct}")

        writer.add_scalar("test loss", test_loss, global_step=epoch+1)
        writer.add_scalar(
            "test correct",
            test_correct,
            global_step=epoch+1
        )


    writer.close()


if __name__ == '__main__':
    main()
