import torch
import torch.nn as nn
import torchvision
from pre_resnet import pytorch_resnet18
from load_cifar10 import train_data_loader, test_data_loader
import os
import time
import tensorboardX

# GPU CPU选择
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# device = torch.device("cpu")
# 执行次数
epoch_num = 200

# 神经网络
net = pytorch_resnet18().to(device)

# loss损失函数
loss_func = nn.CrossEntropyLoss().to(device)

# 学习率
lr = 0.01

# optimizer优化器 参数正则
optimizer = torch.optim.SGD(net.parameters(), lr=lr)
# optimizer = torch.optim.SGD(net.parameters(),lr=lr,momentum=0.9,weight_decay= 5e-4)

# 动态学习率 每五次epoch lr变成原来的0.9
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9)
train_bitch_size = train_data_loader.batch_size
test_bitch_size = test_data_loader.batch_size

# 定义tensorboard
tensorboardXpath = "E:\pythonProject\project1\log"

if not os.path.exists(tensorboardXpath):
    os.mkdir(tensorboardXpath)
# writer = tensorboardX.SummaryWriter(tensorboardXpath)

if __name__ == '__main__':
    # step_n = 0
    # 开始训练
    for epoch in range(epoch_num):
        start_time = time.time()  # 获取当前时间
        # print("当前epoch为：", epoch_num, "/", epoch)

        # =====================训练数据集======================
        # data -> batchsize * MyDataset(img,label)
        for i, data in enumerate(train_data_loader):
            # 表示训练过程 BatchNorm dropout会选择相应的参数
            net.train()
            # print("当前的 mini batch为：", i)
            # torch.Size([6, 3, 28, 28])
            # print(data[0].shape)
            inputs, labels = data
            inputs, labels = inputs.to(device), labels.to(device)

            outputs = net(inputs)
            # torch.Size([6, 10])
            # print(outputs.shape)
            # 计算损失函数
            loss = loss_func(outputs, labels)
            # 初始化0梯度
            optimizer.zero_grad()
            # 根据损失函数结果反向传播
            loss.backward()
            optimizer.step()
            # _, pred = torch.max(outputs.data, dim=1)
            # correct = pred.eq(labels.data).cpu().sum()
            # print("深度学习的 epoch为：", epoch, "当前的 mini batch为：", i, " mini batch loss 为：",
            #       loss.item(),
            #       "准确率为",
            #       100.0 * correct / train_bitch_size)
            # writer.add_scalar("train_loss", loss.item(), global_step=step_n)
            # writer.add_scalar("train_correct", 100.0 * correct / train_bitch_size, global_step=step_n)
            # im = torchvision.utils.make_grid(inputs)
            # writer.add_image("train_img", im, global_step=step_n)
            # step_n = step_n + 1
        # 更新学习率
        scheduler.step()
        #
        # print("学习率为：", optimizer.state_dict()["param_groups"][0]["lr"])
        # 结束一次epoch存储模型
        # if not os.path.exists("E:\pythonProject\project1\models"):
        #     os.mkdir("E:\pythonProject\project1\models")
        # torch.save(net.state_dict(), "E:\pythonProject\project1\models\{}".format(epoch + 1))

        # ========================测试数据集======================
        loss_sum = 0
        correct_sum = 0
        for i, data in enumerate(test_data_loader):
            # 表示训练过程 BatchNorm dropout会选择相应的参数
            net.eval()
            # print("当前的 mini batch为：", i)
            # torch.Size([6, 3, 28, 28])
            # print(data[0].shape)
            inputs, labels = data
            inputs, labels = inputs.to(device), labels.to(device)

            outputs = net(inputs)
            # torch.Size([6, 10])
            # print(outputs.shape)
            # 计算损失函数
            loss = loss_func(outputs, labels)
            _, pred = torch.max(outputs.data, dim=1)
            correct = pred.eq(labels.data).cpu().sum()

            loss_sum = loss_sum + loss.item()
            correct_sum = correct_sum + correct.item()
            # writer.add_scalar("test_loss", loss.item(), global_step=step_n)
            # writer.add_scalar("test_correct", 100.0 * correct / test_bitch_size, global_step=step_n)
            # im = torchvision.utils.make_grid(inputs)
            # writer.add_image("test_img", im, global_step=step_n)
        print("测试集的 epoch为：", epoch, " loss 为：", loss_sum * 1.0 / len(test_data_loader),
              "准确率为",
              100.0 * correct_sum / test_bitch_size / len(test_data_loader))
        # writer.add_scalar("test_loss", loss_sum * 1.0 / len(test_data_loader), global_step=epoch)
        # writer.add_scalar("test_correct", 100.0 * correct_sum / test_bitch_size / len(test_data_loader),
        #                   global_step=epoch)

        end_time = time.time()  # 获取当前时间
        elapsed_time = end_time - start_time  # 计算时间差
        print("当前epoch为：", epoch_num, "/", epoch, "GPU Elapsed time: ", elapsed_time, " seconds.")

        if not os.path.exists("E:\pythonProject\project1\models"):
            os.mkdir("E:\pythonProject\project1\models")
        torch.save(net.state_dict(), "E:\pythonProject\project1\models\model_pytorch_resnet")
        # writer.close()
