# @Author : wangyuchen
# @Time : 2021-05-13 10:22

import os
import argparse
import time
import torch
import torch.nn as nn
import torch.optim as optim
from utils.base import *
import utils.settings as settings
from eval import load_res, cal_acc
import numpy as np
# from torchsummary import summary


def train(epoch):
    start = time.time()
    net.train()
    epoch_loss = 0
    for batch_index, (images, labels) in enumerate(training_loader):
        # 如果设置使用gpu，通过cuda()将tensor转成cuda
        if args.gpu:
            labels = list(map(int, labels))
            labels = np.array(labels).astype(int)
            labels = torch.from_numpy(labels)
            labels = labels.cuda()
            images = images.cuda()

        # 将梯度初始化为0
        optimizer.zero_grad()
        # 前向传播求出预测值
        outputs = net(images)
        # 计算损失
        loss = loss_function(outputs, labels)
        # 反向传播求梯度
        loss.backward()
        # 更新参数
        optimizer.step()

        print('Training Epoch: {epoch} [{trained_samples}/{total_samples}]\tLoss: {:0.4f}\tLR: {:0.6f}'.format(
            loss.item(),
            optimizer.param_groups[0]['lr'],
            epoch=epoch,
            trained_samples=batch_index * args.b + len(images),
            total_samples=len(training_loader.dataset)
        ))
        epoch_loss = loss.item()
    finish = time.time()
    print('epoch {} training time consumed: {:.2f}s'.format(epoch, finish - start))


# @torch.no_grad()中的数据不需要计算梯度，也不会进行反向传播
@torch.no_grad()
def eval_training(epoch, save_path):
    start = time.time()
    net.eval()

    correct = 0.0
    lines = []

    for idx, (img_names, images, labels) in enumerate(validating_loader):
        if args.gpu:
            # print(labels)
            images = images.cuda()
            labels = labels.cuda()

        outputs = net(images)
        #       max()函数返回两个值：最大值和对应的index，dim=1表示输出所在行的最大值（若改写成dim=0则输出所在列的最大值）
        # 行代表样本、列代表类别，所以这里应该用dim=1，因为你需要输出的每个样本的预测类别。
        _, preds = torch.max(outputs.data, 1)
        correct += preds.eq(labels).sum()
        # correct_5 += preds.eq(labels).float()[:, :5].sum()
        for i in range(len(preds)):
            lines.append(img_names[i] + ' ' + str(preds[i].item()) + '\n')
            # print(img_names[i] + ' ' + str(preds[i].item()) + ' ' + str(labels[i].item()))

    finish = time.time()
    if args.gpu:
        print('Use GPU')
    print('Writing Predictions.....')
    with open(save_path.format(epoch=epoch), 'w+') as f:
        f.writelines(lines)
    print('Validation set: Accuracy: {:.4f}, Time consumed:{:.2f}s'.format(correct.item(), finish - start))
    return correct / 10000


if __name__ == '__main__':
    # 创建一个解析器
    parser = argparse.ArgumentParser()
    #       给一个 ArgumentParser 添加程序参数信息是通过调用 add_argument() 方法完成的。
    # 通常，这些调用指定 ArgumentParser 如何获取命令行字符串并将其转换为对象。
    # 这些信息在 parse_args() 调用时被存储和使用。
    parser.add_argument('-net', type=str, required=True, help='net type')
    parser.add_argument('-gpu', action='store_true', default=False, help='use gpu or not')
    parser.add_argument('-b', type=int, default=16, help='batch size for dataloader')
    parser.add_argument('-warm', type=int, default=1, help='warm up training phase')
    parser.add_argument('-lr', type=float, default=1e-5, help='initial learning rate')
    args = parser.parse_args()

    # torch.cuda.empty_cache()
    net = get_network(args)

    # summary(net, (64, 64))
    # writer = SummaryWriter(comment=args.net)

    # mean, std = compute_mean_std('./')
    # print(mean, std)

    # data preprocessing:
    training_loader = get_training_dataloader(
        './train',
        settings.TRAIN_MEAN,
        settings.TRAIN_STD,
        num_workers=4,
        batch_size=args.b,
        shuffle=True
    )

    validating_loader = get_validating_dataloader(
        './val',
        settings.ANNO_PATH,
        settings.VAL_MEAN,
        settings.VAL_STD,
        num_workers=4,
        batch_size=args.b,
        shuffle=True
    )

    # 使用交叉熵损失函数
    loss_function = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(net.parameters(), lr=args.lr)
    iter_per_epoch = len(training_loader)

    checkpoint_path = os.path.join(settings.CHECKPOINT_PATH, args.net, settings.TIME_NOW)

    # create checkpoint folder to save model
    if not os.path.exists(checkpoint_path):
        os.makedirs(checkpoint_path)
    checkpoint_path = os.path.join(checkpoint_path, '{net}-{epoch}-{type}-{acc}.pth')

    if not os.path.exists(args.net):
        os.makedirs(args.net)
    save_path = os.path.join(args.net, settings.PRED_PATH)

    best_acc = 0.0
    best_epoch = 0
    history = []
    for epoch in range(1, settings.EPOCH):
        train(epoch)
        acc = eval_training(epoch, save_path)
        history.append(acc)
        # scheduler.step()
        # start to save best performance model after learning rate decay to 0.01
        if best_acc < acc:
            torch.save(net.state_dict(), checkpoint_path.format(net=args.net, epoch=epoch, type='best', acc=acc))
            best_acc = acc
            best_epoch = epoch
            continue
        print(best_acc)
        print(best_epoch)
    print(best_acc)
    print(best_epoch)
    print(history)
    with open('history.txt', 'w') as fp:
        fp.writelines(history)
