import configparser as cp

import numpy as np
import torch
import torch.optim as optim
from torch.utils.data import DataLoader

from net.facenet.Facenet import Facenet
from net.facenet.Facenet_Training import get_lr_scheduler, set_optimizer_lr, ContrastiveLoss

from utils.dataloader import SiameseNetworkDataset
from utils.utils import getRootPath
from utils.round_train import train_one_epoch


if __name__ == "__main__":
    # 获取项目根目录
    rootPath = getRootPath()
    # 读取配置文件
    filename = r'../config.ini'
    inifile = cp.ConfigParser()
    inifile.read(filename, 'UTF-8')
    # 设置参数值  详细说明可以查看config.ini配置文件
    Cuda = eval(inifile.get('train', 'Cuda'))
    annotation_path = rootPath + inifile.get('train', 'annotation_path')
    input_shape = eval(inifile.get('train', 'input_shape'))
    backbone = inifile.get('train', 'backbone')
    model_path = rootPath + inifile.get('train', 'model_path')
    pretrained = eval(inifile.get('train', 'pretrained'))
    margin = eval(inifile.get('train', 'margin'))
    batch_size = eval(inifile.get('train', 'batch_size'))
    Init_Epoch = eval(inifile.get('train', 'Init_Epoch'))
    Epoch = eval(inifile.get('train', 'Epoch'))
    Init_lr = eval(inifile.get('train', 'Init_lr'))
    Min_lr = eval(inifile.get('train', 'Min_lr'))
    optimizer_type = inifile.get('train', 'optimizer_type')
    momentum = eval(inifile.get('train', 'momentum'))
    lr_decay_type = inifile.get('train', 'lr_decay_type')
    weight_decay = eval(inifile.get('train', 'weight_decay'))
    save_period = eval(inifile.get('train', 'save_period'))
    save_dir = rootPath + inifile.get('train', 'save_dir')
    num_workers = eval(inifile.get('train', 'num_workers'))
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    local_rank = 0
    rank = 0

    # ---------------------------------#
    #   载入模型并加载预训练权重
    # ---------------------------------#
    model = Facenet(backbone=backbone, pretrained=pretrained)

    # 加载预训练权重
    if model_path != '':
        if local_rank == 0:
            print('Load weights {}.'.format(model_path))

        # ------------------------------------------------------#
        #   根据预训练权重的Key和模型的Key进行加载
        # ------------------------------------------------------#
        model_dict = model.state_dict()
        pretrained_dict = torch.load(model_path, map_location=device)
        load_key, no_load_key, temp_dict = [], [], {}
        for k, v in pretrained_dict.items():
            if k in model_dict.keys() and np.shape(model_dict[k]) == np.shape(v):
                temp_dict[k] = v
                load_key.append(k)
            else:
                no_load_key.append(k)
        model_dict.update(temp_dict)
        model.load_state_dict(model_dict)

    # ----------------------#
    #   记录Loss 这里采用二元组Contrastive Loss
    # ----------------------#
    loss = ContrastiveLoss(margin=margin)

    # 变动预训练的权重 开启train
    model_train = model.train()

    with open(annotation_path, "r") as f:
        lines = f.readlines()
    np.random.seed(52013)
    np.random.shuffle(lines)
    np.random.seed(None)
    num_train = len(lines)

    if batch_size % 2 != 0:
        raise ValueError("Batch_size must be the multiple of 2.")
    # -------------------------------------------------------------------#
    #   判断当前batch_size，自适应调整学习率
    # -------------------------------------------------------------------#
    nbs = 64
    lr_limit_max = 1e-3 if optimizer_type == 'adam' else 1e-1
    lr_limit_min = 3e-4 if optimizer_type == 'adam' else 5e-4
    Init_lr_fit = min(max(batch_size / nbs * Init_lr, lr_limit_min), lr_limit_max)
    Min_lr_fit = min(max(batch_size / nbs * Min_lr, lr_limit_min * 1e-2), lr_limit_max * 1e-2)

    # ---------------------------------------#
    #   根据optimizer_type选择优化器
    # ---------------------------------------#
    optimizer = {
        'adam': optim.Adam(model.parameters(), Init_lr_fit, betas=(momentum, 0.999), weight_decay=weight_decay),
        'sgd': optim.SGD(model.parameters(), Init_lr_fit, momentum=momentum, nesterov=True, weight_decay=weight_decay)
    }[optimizer_type]

    # ---------------------------------------#
    #   获得学习率下降的公式
    # ---------------------------------------#
    lr_scheduler_func = get_lr_scheduler(lr_decay_type, Init_lr_fit, Min_lr_fit, Epoch)

    # ---------------------------------------#
    #   判断每一个世代的长度
    # ---------------------------------------#
    epoch_step = num_train // batch_size             # 用以训练的长度
    if epoch_step == 0:
        raise ValueError("数据集过小，无法继续进行训练，请扩充数据集。")

    # ---------------------------------------#
    #   构建数据集加载器。
    # ---------------------------------------#
    train_dataset = SiameseNetworkDataset(input_shape, lines[:num_train], random=True)

    # 用以训练的数据
    gen = DataLoader(train_dataset, shuffle=True, batch_size=batch_size // 2, num_workers=num_workers,
                     pin_memory=True, drop_last=True, sampler=None)

    # 训练的主要流程
    for epoch in range(Init_Epoch, Epoch):
        # 设置优化器
        set_optimizer_lr(optimizer, lr_scheduler_func, epoch)
        # 每一轮训练
        train_one_epoch(model_train=model_train, model=model, loss=loss, optimizer=optimizer, epoch=epoch,
                        epoch_step=epoch_step, gen=gen, Epoch=Epoch, cuda=Cuda, Batch_size=batch_size // 2,
                        fp16=False, scaler=None, save_period=save_period, save_dir=save_dir, local_rank=local_rank)

