"""
在训练集上训练，并且根据验证集来选择模型

在训练集上要反馈loss/acc 来反映训练的层度
"""
import time
import torch
import torch.nn as nn
import torch.optim as optim
import argparse
from models import vgg, resnet, AlexNet
from data import dataset
from utils.recordInfo import Record2Json
from utils.getModels import getNetwork, WarmUpLR
from utils.getModels import get_model_params


def train_singleEpoch(net, train_iter, loss_function, optimizer, epoch, warmup_scheduler=None):
    start_time = time.time()

    net.train()  # 网络会启用dropout 和 batch Normalization

    train_loss = 0.0
    correct = 0.0

    for batch_idx, (inputs, labels) in enumerate(train_iter):
        inputs = inputs.cuda()
        labels = labels.cuda()

        optimizer.zero_grad()
        outputs = net(inputs)
        loss = loss_function(outputs, labels)
        train_loss += loss

        _, preds = outputs.max(dim=1)
        correct += preds.eq(labels).sum()

        loss.backward()
        optimizer.step()

        if epoch <= 1 and warmup_scheduler is not None:
            warmup_scheduler.step()

    finish_time = time.time()
    print('train loss:{:.4f}, acc:{:.4f}, lr:{:.4f}, time consumed:{:.2f}'.format(
        train_loss / len(train_iter.dataset),
        correct / len(train_iter.dataset),
        optimizer.param_groups[0]['lr'],
        finish_time - start_time
    ))

    # record2Json.trainRecord({'Train loss': (train_loss / len(train_iter.dataset)).item(),
    #                          'lr': optimizer.param_groups[0]['lr'],
    #                          'time consumed': finish_time - start_time
    #                          })
    return {'Train loss': (train_loss / len(train_iter.dataset)).item(),
            'Accuracy': (correct / len(train_iter.dataset)).item(),
            'lr': optimizer.param_groups[0]['lr'],
            'time consumed': finish_time - start_time
            }


"""
@torch.no_grad() 等价于 with torch.no_grad():
一个是装饰器（写在测试函数上） 一个是上下文管理器 （写在测试函数内）
执行计算，但该计算不会在反向传播中被记录
减少显存使用 减少计算量
"""
@torch.no_grad()  # 十分重要
def eval_validation(net, val_iter, loss_function):
    start = time.time()
    net.eval()  # 网络不会启用 dropout 和 batch Normalization

    val_loss = 0.0
    correct = 0.0

    for (inputs, labels) in val_iter:
        inputs = inputs.cuda()
        labels = labels.cuda()

        outputs = net(inputs)
        loss = loss_function(outputs, labels)

        val_loss += loss.item()
        _, preds = outputs.max(dim=1)
        correct += preds.eq(labels).sum()

    net.train()
    finish = time.time()
    print('eval on validation: avg_loss:{:.4f}, Accuracy:{:.4f}, time consumed:{:.2f}'.format(
        val_loss / len(val_iter.dataset),
        correct.float() / len(val_iter.dataset),
        # 直接len(val_iter)是batch数目 数据集大小是len(val_iter.dataset)
        finish - start
    ))

    # record2Json.recordValid(
    #     {'avg_loss': val_loss / len(val_iter.dataset),
    #      'Accuracy': (correct.float() / len(val_iter.dataset)).item(),
    #      'time consumed': finish - start
    #      })
    # return correct.float() / len(val_iter.dataset)
    return {'avg_loss': val_loss / len(val_iter.dataset),
            'Accuracy': (correct.float() / len(val_iter.dataset)).item(),
            'time consumed': finish - start
            }


def train(netName, epoch_num=200, lr=0.1, batchSize=128):
    net = getNetwork(netName)
    net.cuda()  # 在pytorch_cifar100中 在获取net的时候就将net放在cuda上了

    cifar100_train = dataset.cifar100_training_dataloader(batch_size=batchSize,
                                                          num_workers=4,
                                                          shuffle=True)
    cifar100_val = cifar100_test = dataset.cifar100_test_dataloader(batch_size=batchSize,
                                                                    num_workers=4,
                                                                    shuffle=True)  # 暂时用测试集代替验证集

    loss_function = nn.CrossEntropyLoss()

    optimizer = optim.SGD(net.parameters(), lr=lr, momentum=0.9, weight_decay=5e-4)
    """
    optimizer的第一个参数 就是提供相关网络的参数
    loss.backward()之后 网络内的变量都会保存自己相关的梯度
    optimizer就是根据网络参数的梯度 对网络数值进行调整
    """
    MILESTONES = [60, 120, 160]
    # train_scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=MILESTONES, gamma=0.2)  # 等间隔调整学习率

    # iter_per_epoch = len(cifar100_train)
    # warmup_scheduler = WarmUpLR(optimizer, iter_per_epoch * 1)  # 刚开始训练的时候（在第1个epoch内）找到一个合适的学习率
    """
    train_scheduler 涉及到学习率的设置
    这里用vgg16跑了一下 设置学习率的变化是 在epoch：1-60 lr=0.1  epoch:61-120 lr=0.02, epoch:121-200 lr=0.004
    前60个epoch 跑到50左右 loss就不再下降了 大概在eval on validation: avg_loss:0.0178, Accuracy:0.4358, time consumed:2.01
    然后 到61 epoch lr降为0.02 accuary立刻从0.44上升到0.62 然基本就维持在这样一个水平
    到121epoch lr降为0.04 accuary立刻从0.64上升到0.70 然后基本维持这个水平
    最后200epoch跑完 accuary为0.71
    
    这其实解释了一周前自己用lr为0.1的SGD优化器跑vgg16只能达到44左右的正确率
    正确率当然是越小越好了，因为Gradient Descent本质上是用梯度做线性近似，移动距离越小，这个近似效果越好
    但是小的lr直接导致模型的训练非常慢 loss是在下降 但是下降的速度非常慢
    
    对于损失平稳的理解
    要下降损失，需要进入损失函数平面的一个峡谷里，但是如果lr过大，会一下子跨过峡谷，损失就下降不了
    一直在一个损失水平上徘徊（粒度不够细），lr再大点都可能回导致loss不断上升
    
    比如 resnet
    The learning rate starts from 0.1 and is divided by 10 when the error plateaus,
    and the models are trained for up to 60 × 10^4 iterations
    损失不再下降就将lr除10
    
    """

    # 尝试实现resnet中的lr调整策略
    train_scheduler = optim.lr_scheduler.ReduceLROnPlateau(
        optimizer=optimizer,
        mode='max',
        factor=0.3,
        patience=10,
        verbose=False,
        threshold=0.01,
        threshold_mode='abs',
        cooldown=0,
        min_lr=0.0001  # 最小学习率 一般设为[1e-4, 1e-6]之间  这里统一设为1e-4进行训练比较
    )
    # 设置完这些调整学习率的方法 需要再某处调用 scheduler.step() 对学习率进行更新

    record2Json = Record2Json(netName)  # 保存训练中间的数据

    for i in range(1, epoch_num + 1):
        print('\n')
        print('-----------------------epoch:{}-----------------------'.format(i))
        ret_train = train_singleEpoch(net=net,
                                      train_iter=cifar100_train,
                                      loss_function=loss_function,
                                      optimizer=optimizer,
                                      epoch=i,
                                      warmup_scheduler=None)

        ret_valid = eval_validation(net=net,
                                    val_iter=cifar100_val,
                                    loss_function=loss_function)

        # train_scheduler.step(i)  # 调用step() 根据相应的策略 调整学习率
        train_scheduler.step(ret_valid['Accuracy'])

        # 记录中间信息
        record2Json.recordTrain(ret_train)
        record2Json.recordValid(ret_valid)

    record2Json.write2json()


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description="train neural network in cifar100")
    parser.add_argument('-net', type=str, required=True, help='net type')
    parser.add_argument('--epoch', type=int, default=200, help='the num of epoch')
    parser.add_argument('-b', '--batchsize', type=int, default=128, help='batch size for dataloader')
    parser.add_argument('-lr', type=float, default=0.1, help='initial learning rate')

    args = parser.parse_args()

    train(netName=args.net, epoch_num=args.epoch, batchSize=args.batchsize, lr=args.lr)

    # print(recordTrain)
    # print(recordValid)

"""
resnet34 在该代码下结果
-----------------------epoch:200-----------------------
train loss:0.0005, lr:0.0001, time consumed:40.12
eval on validation: avg_loss:0.0084, Accuracy:0.7463, time consumed:2.20

使用pytorch-cifar100项目上跑的结果
Training Epoch: 200 [50000/50000]       Loss: 0.0059    LR: 0.000800
Test set: Epoch: 200, Average loss: 0.0072, Accuracy: 0.7762, Time consumed:2.20s
"""
