"""
Train Retinaface_mobilenet0.25.
"""

import argparse
import math
import mindspore

from mindspore import context
from mindspore.context import ParallelMode
from mindspore.train import Model
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor, TimeMonitor
from mindspore.communication.management import init, get_rank
from mindspore.train.serialization import load_checkpoint, load_param_into_net

from src.config import cfg_mobile025
from src.loss import MultiBoxLoss
from src.dataset import create_dataset
from src.lr_schedule import adjust_learning_rate, warmup_cosine_annealing_lr


def train_with_mobilenet(cfg):
    """train_with_mobilenet"""
    # 随机种子设置
    mindspore.common.seed.set_seed(cfg['seed'])
    # 导入合适的包
    from src.network_with_mobilenet import RetinaFace, RetinaFaceWithLossCell, TrainingWrapper, mobilenet025
    # 设置对应的硬件平台
    context.set_context(mode=context.GRAPH_MODE, device_target=cfg['device_target'])
    device_num = cfg['nnpu']
    rank = 0
    if cfg['device_target'] == "Ascend":
        # 判断是否有多个硬件
        if device_num > 1:
            context.reset_auto_parallel_context()
            context.set_auto_parallel_context(device_num=device_num, parallel_mode=ParallelMode.DATA_PARALLEL,
                                              gradients_mean=True)
            init()
            rank = get_rank()
        # 只有单个硬件
        else:
            context.set_context(device_id=cfg['device_id'])
    # 超参数配置读取
    batch_size = cfg['batch_size']
    max_epoch = cfg['epoch']
    momentum = cfg['momentum']
    lr_type = cfg['lr_type']
    weight_decay = cfg['weight_decay']
    loss_scale = cfg['loss_scale']
    initial_lr = cfg['initial_lr']
    gamma = cfg['gamma']
    T_max = cfg['T_max']
    eta_min = cfg['eta_min']
    training_dataset = cfg['training_dataset']
    num_classes = 2
    negative_ratio = 7
    stepvalues = (cfg['decay1'], cfg['decay2'])
    # 读入训练数据
    ds_train = create_dataset(training_dataset, cfg, batch_size, multiprocessing=True, num_worker=cfg['num_workers'])
    print('dataset size is : \n', ds_train.get_dataset_size())
    # 计算每个epoch迭代次数
    steps_per_epoch = math.ceil(ds_train.get_dataset_size())
    # 设置损失函数
    multibox_loss = MultiBoxLoss(num_classes, cfg['num_anchor'], negative_ratio, cfg['batch_size'])
    # 生成backbone模型
    backbone = mobilenet025(1001)
    backbone.set_train(True)
    # 读取预训练模型
    if cfg['pretrain'] and cfg['resume_net'] is None:
        pretrained_mobile025 = cfg['pretrain_path']
        param_dict_mobile025 = load_checkpoint(pretrained_mobile025)
        load_param_into_net(backbone, param_dict_mobile025)
        print('Load mobilenet0.25 from [{}] done.'.format(pretrained_mobile025))
    # 将MobileNet封装进RetinaFace中，并设置为训练模式
    net = RetinaFace(phase='train', backbone=backbone, cfg=cfg)
    net.set_train(True)
    # 读取某个模型，继续训练
    if cfg['resume_net'] is not None:
        pretrain_model_path = cfg['resume_net']
        param_dict_retinaface = load_checkpoint(pretrain_model_path)
        load_param_into_net(net, param_dict_retinaface)
        print('Resume Model from [{}] Done.'.format(cfg['resume_net']))
    # 将模型和损失函数一起封装
    net = RetinaFaceWithLossCell(net, multibox_loss, cfg)
    # 学习率变化调整
    if lr_type == 'dynamic_lr':
        lr = adjust_learning_rate(initial_lr, gamma, stepvalues, steps_per_epoch, max_epoch,
                                  warmup_epoch=cfg['warmup_epoch'], lr_type1=lr_type)
    elif lr_type == 'cosine_annealing':
        lr = warmup_cosine_annealing_lr(initial_lr, steps_per_epoch, cfg['warmup_epoch'], max_epoch, T_max, eta_min)
    # 优化器类型
    if cfg['optim'] == 'momentum':
        opt = mindspore.nn.Momentum(net.trainable_params(), lr, momentum, weight_decay, loss_scale)
    elif cfg['optim'] == 'sgd':
        opt = mindspore.nn.SGD(params=net.trainable_params(), learning_rate=lr, momentum=momentum,
                               weight_decay=weight_decay, loss_scale=loss_scale)
    else:
        raise ValueError('optim is not define.')
    # 将网络和优化器封装在一起
    net = TrainingWrapper(net, opt)
    model = Model(net)
    # checkpoint的设置
    config_ck = CheckpointConfig(save_checkpoint_steps=ds_train.get_dataset_size() * 1,
                                 keep_checkpoint_max=cfg['keep_checkpoint_max'])
    cfg['ckpt_path'] = cfg['ckpt_path'] + "ckpt_" + str(rank) + "/"
    ckpoint_cb = ModelCheckpoint(prefix="RetinaFace", directory=cfg['ckpt_path'], config=config_ck)
    time_cb = TimeMonitor(data_size=ds_train.get_dataset_size())
    callback_list = [LossMonitor(), time_cb, ckpoint_cb]
    # 开始训练
    print("============== Starting Training ==============")
    model.train(max_epoch, ds_train, callbacks=callback_list)


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='train')
    parser.add_argument('--backbone_name', type=str, default='MobileNet025',
                        help='backbone name')
    args_opt = parser.parse_args()
    config = cfg_mobile025
    train_with_mobilenet(cfg=config)
    print('train config:\n', config)
