# 训练模块
import argparse
from utils import utils
import config as cfg
from resnet import *
from models import *
import torch
from torch.utils.data import DataLoader
from dataset import WEBFACE_DATASET_REC
from test import *
from tqdm import tqdm
from evaluate import evlauate_test_data
import os

def parse_config(args, cfg):
    args.weights_to_load = cfg.weights_to_load
    args.backbone_net = cfg.backbone_net
    args.data = cfg.data
    args.end_epoch = cfg.end_epoch
    args.lr = cfg.lr
    args.lr_step = cfg.lr_step
    args.optimizer = cfg.optimizer
    args.logout = cfg.log_out

    args.batch_size = cfg.batch_size
    args.use_gpu = cfg.use_gpu
    args.version_ref = cfg.description

    args.img_number = cfg.img_number
    args.img_class = cfg.img_cls
    args.metric = cfg.metric
    args.s = cfg.s
    args.m = cfg.m
    args.test_batchsize = cfg.test_batchsize
    args.feature_dim = cfg.feature_dim
    args.datapath = cfg.datapath
    args.lfw_root = cfg.lfw_root
    args.lr_scheduler = cfg.lr_scheduler

def main():
    parser = argparse.ArgumentParser(description='Train face network')
    # general
    parser.add_argument('--weights-to-load', type=str, default=None, help='where to load weights')
    parser.add_argument('--backbone-net', default='resnet18', help='specify network')
    parser.add_argument('--data', type=str, default='VOC2012', help='*.data path')
    parser.add_argument('--end-epoch', type=int, default=1000, help='training epoch size.')
    parser.add_argument('--lr', type=float, default=0.1, help='start learning rate')
    parser.add_argument('--lr-step', type=int, default=10, help='period of learning rate decay')
    parser.add_argument('--optimizer', default='sgd', help='optimizer')
    parser.add_argument('--logout', default='train_log.log', help='where to save log')
    parser.add_argument('--batch-size', type=int, default=10, help='the weight that plus with loss of no object')
    parser.add_argument('--use-gpu', type=bool, default=True, help='...')
    parser.add_argument('--version-ref', type=str, help='...')
    parser.add_argument('--img-number', type=int, help='...')
    parser.add_argument('--img-class', type=int, help='...')
    parser.add_argument('--metric', type=str, default='insight face', help='...')
    parser.add_argument('--s', type=float, default=30, help='hyper-parameter of arcface')
    parser.add_argument('--m', type=float, default=0.5, help='hyper-parameter of arcface')
    parser.add_argument('--test-batchsize', type=int, default=30, help='hyper-parameter of arcface')
    parser.add_argument('--feature-dim', type=int, default=512, help='output dimesions of backbone net')
    parser.add_argument('--datapath', type=str, default='../data/', help='train data path')
    parser.add_argument('--lfw-root', type=str, default='../data/', help='test data path')

    args = parser.parse_args()
    parse_config(args, cfg)

    logger = utils.Logger(args.logout).logger
    logger.log(level=20, msg='\t\n')
    logger.info("TRAIN START")
    logger.info("===========Load config===========")
    arg_str = str(args)[10:-1].split(',')
    for item in arg_str:
        logger.info(item.lstrip())
    logger.info("=================================")
    # print(args)

    # 参数加载完毕，训练相关事项
    # 网络搭建
    if args.backbone_net == 'resnet18':
        model = resnet_face18(use_se=False)
    elif args.backbone_net == 'resnet34':
        model = resnet34()
    elif args.backbone_net == 'resnet50':
        model = resnet50()

    # print(model)

    # 损失函数
    if args.metric == 'insight_face':
        out_model = ArcMarginProduct(args.feature_dim, args.img_class, args.s, args.m)
    else:
        pass
    best_acc = 0
    # 加载上次模型参数
    if args.weights_to_load is not None:
        checkpoint = torch.load(args.weights_to_load, map_location='cpu')
        model.load_state_dict(checkpoint['backbone'])
        out_model.load_state_dict(checkpoint['outmodel'])
        # best_acc = checkpoint['best acc']
        best_acc = 0
        checkpoint['epoch'] = 0

        print(
            f"load weights ok ,epoch:{checkpoint['epoch']} ,loss: {checkpoint['loss']},"
            f"acc:{checkpoint['best acc']},threshold:{checkpoint['threshold']}")
    else:
        checkpoint = None

    # 加载数据集
    if args.data == 'CASIA-Webface':
        train_dataset = WEBFACE_DATASET_REC(args.img_number, args.datapath)
        train_dataloader = DataLoader(train_dataset, args.batch_size, shuffle=True, num_workers=1)

    if args.use_gpu:
        model = model.cuda()
        out_model = out_model.cuda()
    # 加载优化器
    if args.optimizer == 'adam':
        opt = torch.optim.Adam([{'params': model.parameters()}, {'params': out_model.parameters()}])
        if checkpoint is not None:
            opt.load_state_dict(checkpoint['opt'])
    elif args.optimizer == 'sgd':
        opt = torch.optim.SGD([{'params': model.parameters()}, {'params': out_model.parameters()}],lr=0.1,momentum=0.9)
        if checkpoint is not None:
            opt.load_state_dict(checkpoint['opt'])

    # 设定学习率调整策略
    if args.lr_scheduler == 'cosine-annealing':
        lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(opt, T_max=10, eta_min=1e-4)


    # 加载测试集 : 使用lfw做为测试集
    identity_list = get_lfw_list(r"lfw_test_pair.txt")
    img_paths = [os.path.join(args.lfw_root, each) for each in identity_list]

    # 定义损失函数
    criterion = torch.nn.CrossEntropyLoss()
    sum_loss = 0
    for i in range(args.end_epoch):
        model.train()

        for ii, data in enumerate(tqdm(train_dataloader)):
            data_input, label = data

            if args.use_gpu:
                data_input = data_input.cuda()
                label = label.cuda().long()
            # 主干网络输出
            feature = model(data_input)
            # 模型输出
            output = out_model(feature, label)
            # 计算损失
            loss = criterion(output, label)
            opt.zero_grad()
            loss.backward()
            opt.step()

            iters = i * len(train_dataloader) + ii
            sum_loss+=loss.cpu().detach().item()
            if iters % 1000 == 0:
                output = output.data.cpu().numpy()
                output = np.argmax(output, axis=1)
                label = label.data.cpu().numpy()
                # print(output)
                # print(label)
                acc = np.mean((output == label).astype(int))
                logger.info(f"epochs:{i},iterations:{iters},loss {sum_loss/1000},accurate {acc}")
                sum_loss = 0
        model.eval()
        # cur_acc,th = model_check(model,r"e:\faces_webface_112x112\lfw.bin", r"lfw_test_pair.txt")
        cur_acc,th = evlauate_test_data(model,r"e:\faces_webface_112x112\lfw.bin")

        # cur_acc,th = lfw_test(model, img_paths, identity_list, r"lfw_test_pair.txt", args.test_batchsize)
        # 保存acc最高的那一组参数
        if cur_acc >= best_acc:
            best_acc = cur_acc
            save_para = {}
            save_para['backbone'] = model.state_dict()
            save_para['outmodel'] = out_model.state_dict()
            save_para['opt'] = opt.state_dict()
            save_para['best acc'] = best_acc
            save_para['threshold'] = th
            save_para['loss'] = loss.detach().cpu().item()
            save_para['epoch'] = i
            torch.save(save_para, f'./weights/{cfg.version}_best.pth')
            logger.info("===========Save weights===========")
            logger.info(f"got best test acc: {best_acc},loss: {save_para['loss']}, threshold: {th},save pt ok")
            logger.info("==================================")
        lr_scheduler.step()

if __name__ == '__main__':
    main()
