from utils.dataloader import ViTDataSet
from utils.vit_utils import get_classes, get_transform
from torch.utils.data import DataLoader
from vit_pytorch.vit import ViTBody
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch import optim
from vit_pytorch.vit_training import get_lr_scheduler, set_optimizer_lr
from utils.fit import fit_one_epoch
import torch
import numpy as np
from vit_pytorch.smooth_label import LabelSmoothing


if __name__ == '__main__':
    # ----------------------------------------------------#
    #   获得图片路径和标签：gen_img_url生成标签数据
    # ----------------------------------------------------#
    train_annotation_path = 'annotation/train.txt'
    val_annotation_path = 'annotation/val.txt'
    # ----------------------------------------------------#
    #   分类类别数据
    # ----------------------------------------------------#
    classes_path = "model_data/classes.txt"
    # ------------------------------------------------------------------#
    #   输入图片大小W H
    # ------------------------------------------------------------------#
    input_shape = 256
    # ------------------------------------------------------------------#
    #   patch 分块大小
    # ------------------------------------------------------------------#
    patch_size = 16
    # ------------------------------------------------------------------#
    #   是否使用GPU
    # ------------------------------------------------------------------#
    Cuda = True
    # ------------------------------------------------------------------#
    #   优化器配置
    # ------------------------------------------------------------------#
    optimizer_type = "sgd"
    momentum = 0.937
    weight_decay = 5e-4
    # ------------------------------------------------------------------#
    #   Init_lr         模型的最大学习率
    #   Min_lr          模型的最小学习率，默认为最大学习率的0.01
    # ------------------------------------------------------------------#
    Init_lr = 1e-2
    Min_lr = Init_lr * 0.01
    # ------------------------------------------------------------------#
    #   batch_size   根据资源配置调整
    # ------------------------------------------------------------------#
    batch_size = 8
    # ------------------------------------------------------------------#
    #   lr_decay_type   使用到的学习率下降方式，可选的有step、cos
    # ------------------------------------------------------------------#
    lr_decay_type = "cos"
    # ------------------------------------------------------------------#
    #   Init_epoch   初始次世代
    #   Total_epoch  最长次世代数
    # ------------------------------------------------------------------#
    Init_epoch = 0
    Total_epoch = 300
    # ------------------------------------------------------------------#
    #   num_workers   多线程加载数据集
    #   shuffle       是否打乱顺序
    # ------------------------------------------------------------------#
    num_workers = 4
    shuffle = True

    # ------------------------------------------------------------------#
    #   model_path   设置预训练权重路径
    # ------------------------------------------------------------------#
    model_path = "pretrain_weights/imagenet21k+imagenet2012_ViT-L_32.npz"
    # ------------------------------------------------------------------#
    #   平滑标签   设置预训练权重路径
    # ------------------------------------------------------------------#
    label_smoothing = 0.1
    # ------------------------------------------------------------------#
    #   权重保存路径   设置预训练权重路径
    # ------------------------------------------------------------------#
    save_dir = "weights"

    # 读取训练集的标签数据
    with open(train_annotation_path) as f:
        train_lines = f.read().splitlines()
    # 读取验证集的标签数据
    with open(val_annotation_path) as f:
        val_lines = f.read().splitlines()
    # train_lines 获取图片的路径 [img_url1, img_url2...img_urls]
    num_train = len(train_lines)
    # 获取验证集的数据 [img_url1, img_url2...img_urls]
    num_val = len(val_lines)

    # ------------------------------------------------------------------#
    #   class_names   类别[class1,class2,...,classN]
    #   num_classes   类别总数len(class_names)
    # ------------------------------------------------------------------#
    class_names, num_classes = get_classes(classes_path)

    train_dataset = ViTDataSet(train_lines, input_shape, class_names, get_transform("train"))
    val_dataset = ViTDataSet(val_lines, input_shape, class_names, get_transform("val"))
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    local_rank = 0
    # 加载模型
    model = ViTBody(image_size=input_shape,
                    patch_size=patch_size,
                    num_classes=num_classes,
                    mlp_dim=3072,
                    dim=768,
                    dropout=0.1,
                    emb_dropout=0.1)

    # 使用预训练参数
    if model_path != "":
        # ------------------------------------------------------#
        #   根据预训练权重的Key和模型的Key进行加载
        # ------------------------------------------------------#
        model_dict = model.state_dict()
        pretrained_dict = torch.load(model_path, map_location=device)
        load_key, no_load_key, temp_dict = [], [], {}
        pretrain_state_dict = pretrained_dict[""]
        for k, v in pretrained_dict.items():
            if k in model_dict.keys() and np.shape(model_dict[k]) == np.shape(v):
                temp_dict[k] = v
                load_key.append(k)
            else:
                no_load_key.append(k)
        model_dict.update(temp_dict)
        model.load_state_dict(model_dict)
        # ------------------------------------------------------#
        #   显示没有匹配上的Key
        # ------------------------------------------------------#
        if local_rank == 0:
            print("\nSuccessful Load Key:", str(load_key)[:500], "……\nSuccessful Load Key Num:", len(load_key))
            print("\nFail To Load Key:", str(no_load_key)[:500], "……\nFail To Load Key num:", len(no_load_key))
            print("\n\033[1;33;44m温馨提示，head部分没有载入是正常现象，Backbone部分没有载入是错误的。\033[0m")
    else:
        # TODO 初始化权重
        # weightInit(model)
        pass

    model_train = model.train()
    if Cuda:
        model_train = nn.DataParallel(model)
        cudnn.benchmark = True
        model_train = model_train.cuda()
    # ------------------------------------------------------#
    #   分类使用交叉熵损失函数
    # ------------------------------------------------------#
    loss = nn.CrossEntropyLoss
    # ------------------------------------------------------#
    #   是否使用平滑标签
    # ------------------------------------------------------#
    if label_smoothing > 0.0:
        loss = lambda: LabelSmoothing(label_smoothing)
    criterion = loss()
    # -------------------------------------------------------------------#
    #   判断当前batch_size，自适应调整学习率
    # -------------------------------------------------------------------#
    for epoch in range(Init_epoch, Total_epoch):
        nbs = 64
        lr_limit_max = 1e-3 if optimizer_type == 'adam' else 5e-2
        lr_limit_min = 3e-4 if optimizer_type == 'adam' else 5e-4
        # 调整学习率
        Init_lr_fit = min(max(batch_size / nbs * Init_lr, lr_limit_min), lr_limit_max)
        Min_lr_fit = min(max(batch_size / nbs * Min_lr, lr_limit_min * 1e-2), lr_limit_max * 1e-2)

        # ---------------------------------------#
        #   根据optimizer_type选择优化器
        # ---------------------------------------#
        pg0, pg1, pg2 = [], [], []
        for k, v in model.named_modules():
            if hasattr(v, "bias") and isinstance(v.bias, nn.Parameter):
                pg2.append(v.bias)
            if isinstance(v, nn.BatchNorm2d) or "bn" in k:
                pg0.append(v.weight)
            elif hasattr(v, "weight") and isinstance(v.weight, nn.Parameter):
                pg1.append(v.weight)
        optimizer = {
            # TODO:测试Adai优化器
            'adam': optim.Adam(pg0, Init_lr_fit, betas=(momentum, 0.999)),
            'sgd': optim.SGD(pg0, Init_lr_fit, momentum=momentum, nesterov=True)
        }[optimizer_type]
        optimizer.add_param_group({"params": pg1, "weight_decay": weight_decay})
        optimizer.add_param_group({"params": pg2})

        # ---------------------------------------#
        #   获得学习率下降的公式
        # ---------------------------------------#
        lr_scheduler_func = get_lr_scheduler(lr_decay_type, Init_lr_fit, Min_lr_fit, epoch)

        # TODO 暂不处理：如果有提取特征的主干网络
        # for param in model.backbone.parameters():
        #     param.requires_grad = True

        epoch_step = num_train // batch_size
        epoch_step_val = num_val // batch_size

        if epoch_step == 0 or epoch_step_val == 0:
            raise ValueError("数据集过小，无法继续进行训练，请扩充数据集。")

        gen = DataLoader(train_dataset, shuffle=shuffle, batch_size=batch_size, num_workers=num_workers,
                         pin_memory=True, drop_last=True)
        gen_val = DataLoader(val_dataset, shuffle=shuffle, batch_size=batch_size, num_workers=num_workers,
                             pin_memory=True, drop_last=True)

        # 每训练一个epoch 通过lr_scheduler_func方法更新学习率
        set_optimizer_lr(optimizer, lr_scheduler_func, epoch)

        # 真正训练的地方：训练模型+每个epoch验证模型
        fit_one_epoch(model_train, model, criterion, optimizer, epoch, epoch_step, epoch_step_val, gen,
                                       gen_val, Total_epoch, Cuda, local_rank, save_dir)
