# 训练脚本
import os
import sys
import time

import torch
import torch.nn as nn
import torchvision

import tensorboardX

from pre_vggnet import pytorch_vggnet11
from load_cifar10 import train_data_loader, test_data_loader

# 判断是否存在GPU
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

epoch_num = 1  # 对样本遍历的次数
lr = 0.01  # 学习率

net = pytorch_vggnet11().to(device)

# loss
loss_func = nn.CrossEntropyLoss()

# optimizer
optimizer = torch.optim.Adam(net.parameters(), lr=lr)

# 学习率调整的方法
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.9)  # 采用指数衰减，每5次变成上一次学习率的0.9倍


def train_model():
    # 定义训练过程
    if not os.path.exists('../tensorboard_log'):
        os.makedirs('../tensorboard_log')

    writer = tensorboardX.SummaryWriter('../tensorboard_log')
    step_n = 0

    for epoch in range(epoch_num):
        net.train()

        sum_loss = 0
        sum_correct = 0
        for i, data in enumerate(train_data_loader):
            inputs, labels = data
            inputs, labels = inputs.to(device), labels.to(device)

            outputs = net(inputs)
            loss = loss_func(outputs, labels)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            # 计算准确率
            _, pred = torch.max(outputs.data, dim=1)
            correct = pred.eq(labels.data).cpu().sum()
            print('epoch is ', epoch,
                  'step', i,
                  'loss is:', loss.item(),
                  'mini-batch correct is:', 100.0 * correct / train_data_loader.batch_size,
                  'lr is:', optimizer.state_dict()['param_groups'][0]['lr'])
            writer.add_scalar('train loss', loss.item(), global_step=step_n)
            writer.add_scalar('train correct', 100.0 * correct / train_data_loader.batch_size, global_step=step_n)
            step_n += 1

            sum_loss += loss.item()
            sum_correct += correct.item()

        print('================================')
        print('epoch is ', epoch,
              'loss is:', sum_loss * 1.0 / len(train_data_loader),
              'mini-batch correct is:', 100.0 * sum_correct / len(train_data_loader) / train_data_loader.batch_size,
              'lr is:', optimizer.state_dict()['param_groups'][0]['lr'])
        print('================================')

        # 每次epoch后对学习率进行更新
        scheduler.step()

    # 保存模型
    if not os.path.exists('../model'):
        os.makedirs('../model')

    torch.save(net, '../model/train_pytorch_vggnet11_1.pkl')

    writer.close()


if __name__ == '__main__':
    stdout_backup = sys.stdout

    log_file = open('../txt_log/train_pytorch_vggnet11_1.txt', "w")
    sys.stdout = log_file  # 将系统输出切换至log_file

    start = time.time()
    train_model()
    end = time.time()
    print(str((end - start)) + 's')

    log_file.close()
    sys.stdout = stdout_backup  # 将系统输出切换回console
