"""
训练ResNet网络
"""
from torchvision import transforms
from torch import nn, cuda, optim, no_grad, save, randn
from torch.utils.data import DataLoader
import os, sys
from tqdm import tqdm
# 自定义
from model.ResNet import resnet18, resnet34, resnet50, resnet101, resnet152
from model.ResNext import resnext50
from model.MobileNetv1 import mobilenetv1
from model.MobileNetv2 import mobilenetv2
from model.MobileNetv3 import mobilenetv3
from model.VGG import vgg19, vgg16
from utils.Dataset import CustomDataset
from utils.utils import load_dataset_message, get_model_flops_args
from torchvision.datasets import CIFAR10

# 参数配置
from argparse import ArgumentParser

# 训练可视化
from utils.log import init_tensorboard, visual_scalars_tensorboard_script, visual_model_tensorboard_script, close_tensorboard

# 加载训练集与测试集
def train(args):
    # ------------------------------------------------------------#
    # base config
    # ------------------------------------------------------------#
    epochs = args.epochs
    best_accuracy = 0.0
    #------------------------------------------------------------#
    # GPU is available?
    #------------------------------------------------------------#
    flag = True if cuda.is_available() else False

    #------------------------------------------------------------#
    # data augment
    #------------------------------------------------------------#
    data_transform = {
        "train": transforms.Compose([
        transforms.Resize((args.size, args.size)),
        transforms.ToTensor(),
        ]),
        "val": transforms.Compose([
            transforms.Resize((args.size, args.size)),
            transforms.ToTensor(),
        ])
    }

    # ------------------------------------------------------------#
    # load dataset
    # ------------------------------------------------------------#
    # image path
    image_path = "./dataset"
    train_set = CustomDataset(*load_dataset_message(os.path.join(image_path, "train")), handle_methods=data_transform["train"])
    validate_set = CustomDataset(*load_dataset_message(os.path.join(image_path, "val")), handle_methods=data_transform["val"])
    # train_set = CIFAR10(image_path, train=True, transform=data_transform["train"], download=True)
    # validate_set = CIFAR10(image_path, train=False, transform=data_transform["val"], download=True)
    val_set_len = validate_set.__len__()

    # ------------------------------------------------------------#
    # load dataset
    # ------------------------------------------------------------#
    batch_size_train = args.batch
    batch_size_val = args.batch
    train_loader = DataLoader(train_set, batch_size_train, True, collate_fn=CustomDataset.collate_func)
    val_loader = DataLoader(validate_set, batch_size_val, True, collate_fn=CustomDataset.collate_func)

    # ------------------------------------------------------------#
    # init tensorboard
    # ------------------------------------------------------------#
    writer = init_tensorboard(os.path.join(os.getcwd(), "runs", args.name))

    # ------------------------------------------------------------#
    # init model
    # ------------------------------------------------------------#
    model = eval(args.model)(args.classes)
    # get_model_flops_args(model, (3, 224, 224))
    if flag:
        model = nn.DataParallel(model.cuda())
    # ------------------------------------------------------------#
    # loss function
    # ------------------------------------------------------------#
    loss_function = nn.CrossEntropyLoss()
    # ------------------------------------------------------------#
    # optimizer
    # ------------------------------------------------------------#
    lr = args.lr
    # 取出学习参数
    weight, bias, bn_weight = [], [], []
    for v in model.modules():
        # 将偏置参数放入一个组
        if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter):
            bias.append(v.bias)
        # 将BN的β参数放进一个组，并且没有衰减
        if isinstance(v, nn.BatchNorm2d):
            bn_weight.append(v.weight)
        # 权重参数放进一个组，并且有权重衰减
        elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter):  # weight (with decay)
            weight.append(v.weight)

    optimizer = optim.SGD(bn_weight, lr=lr, momentum=0.937, nesterov=True)
    # 将学习的参数放入优化器
    optimizer.add_param_group({"params": weight, "weight_decay": 5E-4})
    optimizer.add_param_group({"params": bias})
    lf = lambda x: (1 - x / epochs) * (1.0 - lr) + lr  # linear
    # 将优化器和学习率函数加载到学习率更新函数
    scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)  # plot_lr_scheduler(optimizer, scheduler, epochs)
    # ------------------------------------------------------------#
    # start train
    # ------------------------------------------------------------#
    for epoch in range(epochs):
        model.train()
        total_loss = 0.0
        char = tqdm(train_loader, file=sys.stdout)
        for step, data in enumerate(char):
            images, labels = data
            if flag:
                images, labels = images.cuda(), labels.cuda()
            optimizer.zero_grad()
            x = model(images)
            loss = loss_function(x, labels)
            loss.backward()
            # visual_model_tensorboard_script(writer, model, images)
            optimizer.step()
            # get loss value
            total_loss += loss.item()
            char.set_description(
                "Processing epoch[{}/{}], "
                "Learning-Ratio->{}, "
                "Training-Loss->{}".format(epoch, epochs, round(optimizer.param_groups[0]["lr"], 6), round(total_loss, 2)))

        # 一轮epochs的正确率验证
        with no_grad():
            model.eval()
            right = 0
            val_total_loss = 0.0
            val_char = tqdm(val_loader, file=sys.stdout)
            for step, data in enumerate(val_char):
                images, labels = data
                if flag:
                    images, labels = images.cuda(), labels.cuda()
                forward_ret = model(images)
                # 求解验证损失
                val_total_loss += loss_function(forward_ret, labels).item()
                # 计算正确个数
                right += (forward_ret.argmax(1) == labels).sum()
                val_char.set_description(
                    "Processing epoch[{}/{}], "
                    "Learning-Ratio->{}, "
                    "Validation-Loss->{}".format(epoch, epochs, round(optimizer.param_groups[0]["lr"], 6),
                                               round(val_total_loss, 2)))
                # 计算正确率
            val_accuracy = ((right / val_set_len) * 100).item()
            # 写入tensorboard
            visual_scalars_tensorboard_script(writer, "Training Loss", total_loss, epoch)
            visual_scalars_tensorboard_script(writer, "Validation Loss", val_total_loss, epoch)
            visual_scalars_tensorboard_script(writer, "Accuracy", val_accuracy, epoch)
            visual_scalars_tensorboard_script(writer, "Learning Rate", lr, epoch)
            val_char.set_description(
                "Processing epochs[{}/{}], "
                "Validation-Loss->{}, "
                "Validation-Accuracy->{}%".format(epoch, epochs, round(val_total_loss, 2), round(val_accuracy, 2)))
            # store pth
            if val_accuracy >= best_accuracy:
                best_accuracy = val_accuracy
                weights_pth = os.path.join(os.getcwd(), "runs", args.name, "weights")
                if not os.path.exists(weights_pth):
                    os.makedirs(weights_pth)
                save(model.state_dict(), os.path.join(weights_pth, "epoch_{}.loss_{}.accuracy_{}.pth".format(epoch, total_loss, val_accuracy)))
        print("Loss-Value:{}, Accuracy-Ratio:{}%, Learning-Ratio:{}".format(round(total_loss, 2), round(val_accuracy, 2), round(optimizer.param_groups[0]["lr"], 6)))
        scheduler.step(epoch)
    close_tensorboard(writer)

if __name__ =="__main__":
    # 初始化参数解析器
    parser = ArgumentParser(description="training config")

    parser.add_argument("--epochs", type=int, default=100, help="training epoch")
    parser.add_argument("--model", type=str, default="resnet18", help="training model")
    parser.add_argument("--classes", type=int, help="classes number")
    parser.add_argument("--lr", type=float, default=0.01, help="classes number")
    parser.add_argument("--size", type=int, default=1140, help="image size")
    parser.add_argument("--batch", type=int, default=16, help="image batch size")
    parser.add_argument("--name", type=str, default="exp", help="save path")

    # 解析参数
    args = parser.parse_args()
    train(args)