import os
import argparse
import random

import numpy as np
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import DataLoader

from utils.utils import get_lr

from models.facenet import Facenet
from models.facenet_training import (get_lr_scheduler, set_optimizer_lr,
                                   triplet_loss, weights_init)
from utils.callback import LossHistory
from utils.dataloader import FacenetDataset_re
from utils.utils import show_config

def parse_option():
    parser = argparse.ArgumentParser()

    # Model
    parser.add_argument('--backbone', default='inception_resnetv1', type=str, help='mobilenet or inception_resnetv1')
    parser.add_argument('--model_path', default="model_data/facenet_inception_resnetv1.pth", type=str, help='path for model to load')
    parser.add_argument('--pretrained', action='store_true', help='whether to pretained backbone')

    # Train
    parser.add_argument('--Cuda', action='store_false', help='whether to use cuda')
    parser.add_argument('--distributed', action='store_true', help='whether to use distributed')
    parser.add_argument('--batch_size', default=64, type=int, help='batch_size set')
    parser.add_argument('--Epoch', default=120, type=int, help='Epoch set')

    # Data
    parser.add_argument('--annotation_path', default="cls_train.txt", type=str, help="annotation_path(txt)")
    parser.add_argument('--input_shape', default=160, type=int, help="input_shape --> (160,160)")
    parser.add_argument('--num_workers', default=12, type=int, help="num_workers = 12")
    parser.add_argument('--num_classes', default=311, type=int, help="num_classes = 311")
    parser.add_argument('--num_train', default=5350, type=int, help="num_train to split annotation_path")
    parser.add_argument('--fp16', action='store_true', help='fp16')

    # Loss
    parser.add_argument('--optimizer_type', default='adam', type=str, help='adam or sgd')
    parser.add_argument('--momentum', type=float, default=0.9, help='momentum for adam')
    parser.add_argument('--weight_decay', default=0, type=int, help="weight_decay for optimizer(set 0)")
    parser.add_argument('--Init_lr', type=float, default=0.001, help='Init_lr for adam')
    parser.add_argument('--Min_lr', type=float, default=0.00001, help='Min_lr for adam')
    parser.add_argument('--lr_decay_type', default='cos', type=str, help='type for scheduler(cos or step)')

    # Save
    parser.add_argument('--save_period', default=1, type=int, help="save log & model per epoch")
    parser.add_argument('--save_dir', default='logs', type=str, help='where to save log & model')

    args, unparsed = parser.parse_known_args()

    return args

def seed_all(seed_value):
    random.seed(seed_value) # Python
    np.random.seed(seed_value) # cpu vars
    torch.manual_seed(seed_value) # cpu vars
    
    if torch.cuda.is_available(): 
        torch.cuda.manual_seed(seed_value)
        torch.cuda.manual_seed_all(seed_value) # gpu vars
        torch.backends.cudnn.enabled = True
        torch.backends.cudnn.deterministic = True  #needed
        torch.backends.cudnn.benchmark = True

def get_loader(args,ngpus_per_node):

    with open(args.annotation_path,"r") as f:
        lines = f.readlines()

    args.num_val = len(lines) - args.num_train
    input_shape = (args.input_shape, args.input_shape)

    train_dataset   = FacenetDataset_re(input_shape, lines[:args.num_train], args.num_classes, mode='train')
    val_dataset     = FacenetDataset_re(input_shape, lines[args.num_train:], args.num_classes, mode='test')

    if args.distributed:
        train_sampler   = torch.utils.data.distributed.DistributedSampler(train_dataset, shuffle=True,)
        val_sampler     = torch.utils.data.distributed.DistributedSampler(val_dataset, shuffle=False,)
        batch_size      = batch_size // ngpus_per_node
        shuffle         = False
    else:
        train_sampler   = None
        val_sampler     = None
        shuffle         = True
    
    def seed_worker(worker_id):
        worker_seed = torch.initial_seed() % 2**32
        np.random.seed(worker_seed)
    
    gen = DataLoader(train_dataset, shuffle=shuffle, batch_size=args.batch_size, num_workers=args.num_workers, pin_memory=True,
                            drop_last=True,worker_init_fn=seed_worker,sampler=train_sampler)
    gen_val = DataLoader(val_dataset, shuffle=shuffle, batch_size=args.batch_size, num_workers=args.num_workers, pin_memory=True,
                            drop_last=True,worker_init_fn=seed_worker,sampler=val_sampler)

    return gen, gen_val


def get_model(args,local_rank,device):
    model = Facenet(backbone=args.backbone, num_classes=args.num_classes, pretrained=args.pretrained)

    if args.model_path != '':
        if local_rank == 0:
            print('Load weights {}.'.format(args.model_path))
        
        #------------------------------------------------------#
        #   根据预训练权重的Key和模型的Key进行加载
        #------------------------------------------------------#
        model_dict      = model.state_dict()
        # map_location 规定设备映射方式
        pretrained_dict = torch.load(args.model_path, map_location = device)
        
        load_key, no_load_key, temp_dict = [], [], {}
        for k, v in pretrained_dict.items():
            if k in model_dict.keys() and np.shape(model_dict[k]) == np.shape(v):
                temp_dict[k] = v
                load_key.append(k)
            else:
                no_load_key.append(k)
        model_dict.update(temp_dict)
        model.load_state_dict(model_dict)
        #------------------------------------------------------#
        #   显示没有匹配上的Key
        #------------------------------------------------------#
        if local_rank == 0:
            print("\nSuccessful Load Key:", str(load_key)[:500], "……\nSuccessful Load Key Num:", len(load_key))
            print("\nFail To Load Key:", str(no_load_key)[:500], "……\nFail To Load Key num:", len(no_load_key))
            print("\n\033[1;33;44m温馨提示，head部分没有载入是正常现象，Backbone部分没有载入是错误的。\033[0m")
        
        model_train     = model.train()

        if args.Cuda:
            if args.distributed:
                #----------------------------#
                #   多卡平行运行
                #----------------------------#
                model_train = model_train.cuda(local_rank)
                model_train = torch.nn.parallel.DistributedDataParallel(model_train, device_ids=[local_rank], find_unused_parameters=True)
            else:
                model_train = torch.nn.DataParallel(model)
                cudnn.benchmark = True
                model_train = model_train.cuda()

        return model_train, model

    
def get_loss_opti(args,model):
        nbs             = 64
        lr_limit_max    = 1e-3 if args.optimizer_type == 'adam' else 1e-1
        lr_limit_min    = 3e-4 if args.optimizer_type == 'adam' else 5e-4
        Init_lr_fit     = min(max(args.batch_size / nbs * args.Init_lr, lr_limit_min), lr_limit_max)
        Min_lr_fit      = min(max(args.batch_size / nbs * args.Min_lr, lr_limit_min * 1e-2), lr_limit_max * 1e-2)

        #---------------------------------------#
        #   根据optimizer_type选择优化器
        #---------------------------------------#
        optimizer = {
            'adam'  : optim.Adam(model.parameters(), Init_lr_fit, betas = (args.momentum, 0.999), weight_decay = args.weight_decay),
            'sgd'   : optim.SGD(model.parameters(), Init_lr_fit, momentum=args.momentum, nesterov=True, weight_decay = args.weight_decay)
        }[args.optimizer_type]

        #---------------------------------------#
        #   获得学习率下降的公式
        #---------------------------------------#
        lr_scheduler_func = get_lr_scheduler(args.lr_decay_type, Init_lr_fit, Min_lr_fit, args.Epoch)
        
        #----------------------#
        #   记录Loss
        #----------------------#
        input_shape = [160, 160, 3]
        # if local_rank == 0:
        if True:
            loss_history = LossHistory(args.save_dir, model, input_shape=input_shape)
        else:
            loss_history = None

        return optimizer, lr_scheduler_func, loss_history


def main(args):
    ngpus_per_node  = torch.cuda.device_count()
    # #------------------------------------------------------#
    # #   设置用到的显卡
    # #------------------------------------------------------#  
    if args.distributed:
        dist.init_process_group(backend="nccl")
        local_rank  = int(os.environ["LOCAL_RANK"])
        rank        = int(os.environ["RANK"])
        device      = torch.device("cuda", local_rank)
        if local_rank == 0:
            print(f"[{os.getpid()}] (rank = {rank}, local_rank = {local_rank}) training...")
            print("Gpu Device Count : ", ngpus_per_node)
    else:
        device          = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        local_rank      = 0
        rank            = 0

    if args.fp16:
        from torch.cuda.amp import GradScaler as GradScaler
        scaler = GradScaler()
    else:
        scaler = None

    lfw_eval_flag   = False
    LFW_loader =  None
    train_loader, test_loader = get_loader(args,ngpus_per_node)

    model_train, model = get_model(args,local_rank,device)

    optimizer, lr_scheduler_func, loss_history = get_loss_opti(args,model)

    epoch_step      = args.num_train // args.batch_size
    epoch_step_val  = args.num_val // args.batch_size

    # 打印配置
    show_config(
        num_classes = args.num_classes, backbone = args.backbone, model_path = args.model_path, input_shape = args.input_shape, \
        Init_Epoch = 0, Epoch = args.Epoch, batch_size = args.batch_size, \
        Init_lr = args.Init_lr, Min_lr = args.Min_lr, optimizer_type = args.optimizer_type, momentum = args.momentum, lr_decay_type = args.lr_decay_type, \
        save_period = args.save_period, save_dir = args.save_dir, num_workers = args.num_workers, num_train = args.num_train, num_val = args.num_val
    )
    
    for epoch in range(0, args.Epoch):
        if args.distributed:
            train_loader.sampler.set_epoch(epoch)
        # lr_scheduler_func 学习率下降公式
        set_optimizer_lr(optimizer, lr_scheduler_func, epoch)
        
        fit_one_epoch(model_train, model, loss_history, optimizer, epoch, epoch_step, epoch_step_val, train_loader, \
        test_loader, args.Epoch, args.Cuda, None, args.batch_size, lfw_eval_flag, args.fp16, scaler, args.save_period, \
        args.save_dir, local_rank)
    print('\n')
    if local_rank == 0:
        loss_history.writer.close()


def fit_one_epoch(model_train, model, loss_history, optimizer, epoch, epoch_step, epoch_step_val, gen, gen_val, Epoch, cuda, test_loader, Batch_size, lfw_eval_flag, fp16, scaler, save_period, save_dir, local_rank):
    '''
        model_train:训练用
        model:保存用
        loss_history:记录训练log的对象
        loss: triplet_loss()
        optimizer:adam
        epoch: 当前epoch
        epoch_step:每个epoch的batch数目
        epoch_step_val:
        gen:训练集的loader
        gen_val测试集的loader
        Epoch:总共的epoch
        cuda:是否使用cuda
        test_loader:LFW_loader
        Batch_size:
        lfw_eval_flag:是否开启LFW评估
        fp16:是否使用混合精度训练 减少显存使用
        save_period:多少个epoch记录一次
        save_dir:权值与日志文件保存的文件夹
        local_rank:多机多卡
    '''
    total_CE_loss       = 0
    total_accuracy      = 0

    val_total_CE_loss       = 0
    val_total_accuracy      = 0

    if local_rank == 0:
        print('Start Train')
        pbar = tqdm(total=epoch_step,desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3)
        
    # 模型设置为训练模式
    model_train.train()
    
    # 开始训练
    for iteration, batch in enumerate(gen):
        # 防止取空数据
        if iteration >= epoch_step:
            break
        
        # batch的内容是[3,3,W,H],[c1,c2,c3],三张图，三个label
        images, labels = batch
        
        # 使用GPU
        with torch.no_grad():
            if cuda:
                images  = images.cuda(local_rank)
                labels  = labels.cuda(local_rank)
        
        # 梯度清理
        optimizer.zero_grad()
        
        if not fp16:
            # def forward(self, x, mode = "predict"):
            # 推理
            # outputs1是128维的特征描述符  outputs2是num_classes维的分类特征向量
            _, outputs2 = model_train(images, "train")
            
            # _triplet_loss   = loss(outputs1, Batch_size)
            # 类似交叉熵损失  帮助模型收敛
            _CE_loss        = nn.NLLLoss()(F.log_softmax(outputs2, dim = -1), labels)
            _loss           = _CE_loss

            _loss.backward()
            optimizer.step()      
        else:
            from torch.cuda.amp import autocast
            with autocast():
                outputs1, outputs2 = model_train(images, "train")

                # _triplet_loss   = loss(outputs1, Batch_size)
                _CE_loss        = nn.NLLLoss()(F.log_softmax(outputs2, dim = -1), labels)
                _loss           = _CE_loss
            #----------------------#
            #   反向传播
            #----------------------#
            scaler.scale(_loss).backward()
            scaler.step(optimizer)
            scaler.update()  

        with torch.no_grad():
            # 计算分类准确率
            accuracy = torch.mean((torch.argmax(F.softmax(outputs2, dim=-1), dim=-1) == labels).type(torch.FloatTensor))
        
        # 损失和准确率统计
        # total_triple_loss   += _triplet_loss.item()
        total_CE_loss       += _CE_loss.item()
        total_accuracy      += accuracy.item()

        if local_rank == 0:
            pbar.set_postfix(**{# 'total_triple_loss' : total_triple_loss / (iteration + 1), 
                                'total_CE_loss'     : total_CE_loss / (iteration + 1), 
                                'accuracy'          : total_accuracy / (iteration + 1), 
                                'lr'                : get_lr(optimizer)})
            pbar.update(1)
    
    # 结束训练    
    if local_rank == 0:
        pbar.close()
        print('Finish Train')
        print('Start Validation')
        pbar = tqdm(total=epoch_step_val, desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3)
    model_train.eval()
    
    # 开始验证
    for iteration, batch in enumerate(gen_val):
        # 防止取空数据
        if iteration >= epoch_step_val:
            break
        
        images, labels = batch
        with torch.no_grad():
            if cuda:
                images  = images.cuda(local_rank)
                labels  = labels.cuda(local_rank)

            optimizer.zero_grad()
            
            # 设置训练模式 但是不进行梯度下降
            # 因为还需要看分类结果  predict模式只有特征描述符返回
            outputs1, outputs2 = model_train(images, "train")
            
            # _triplet_loss   = loss(outputs1, Batch_size)
            _CE_loss        = nn.NLLLoss()(F.log_softmax(outputs2, dim = -1), labels)
            _loss           = _CE_loss
            
            accuracy        = torch.mean((torch.argmax(F.softmax(outputs2, dim=-1), dim=-1) == labels).type(torch.FloatTensor))
            
            # val_total_triple_loss   += _triplet_loss.item()
            val_total_CE_loss       += _CE_loss.item()
            val_total_accuracy      += accuracy.item()

        if local_rank == 0:
            pbar.set_postfix(**{# 'val_total_triple_loss' : val_total_triple_loss / (iteration + 1), 
                                'val_total_CE_loss'     : val_total_CE_loss / (iteration + 1), 
                                'val_accuracy'          : val_total_accuracy / (iteration + 1), 
                                'lr'                    : get_lr(optimizer)})
            pbar.update(1)
        
    if local_rank == 0:
        pbar.close()
        print('Finish Validation')

        # 训练log的记录
        loss_history.append_loss(epoch, \
            total_accuracy / epoch_step, \
            val_total_accuracy / epoch_step_val,\
            (total_CE_loss) / epoch_step, \
            (val_total_CE_loss) / epoch_step_val)
            
        print('Epoch:' + str(epoch + 1) + '/' + str(Epoch))
        print('Total Loss: %.4f' % ((total_CE_loss) / epoch_step))
        
        # 保存模型（save_period=1 全部都保存
        if (epoch + 1) % save_period == 0 or epoch + 1 == Epoch:
            torch.save(model.state_dict(), os.path.join(save_dir, 'ep%03d-loss%.3f-val_loss%.3f.pth'%((epoch + 1),
                                                                    (total_CE_loss) / epoch_step,
                                                                    (val_total_CE_loss) / epoch_step_val)),_use_new_zipfile_serialization=False)


if __name__ == "__main__":
    args = parse_option()
      
    seed_all(42)

    main(args)
