"""
2022.4.20
author：alian
车道线训练代码
"""
import os

os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
import torch, os, datetime, time
import torch.backends.cudnn
# 导入项目源码中的文件
from model.model import parsingNet
from utils_alian.dataloader_alian import get_train_loader  # 自定义
from utils.dist_utils import dist_print, dist_tqdm
from utils.factory import get_metric_dict, get_loss_dict, get_optimizer, get_scheduler
from utils.metrics import update_metrics, reset_metrics
from utils.common import save_model, cp_projects
from utils.common import get_logger
from utils_alian.config import get_args, get_work_dir

os.environ["CUDA_VISIBLE_DEVICES"] = "0"


def inference(net, data_label, use_aux):  # 推理
    if use_aux:
        img, cls_label, seg_label = data_label
        img, cls_label, seg_label = img.cuda(), cls_label.long().cuda(), seg_label.long().cuda()
        cls_out, seg_out = net(img)
        return {'cls_out': cls_out, 'cls_label': cls_label, 'seg_out': seg_out, 'seg_label': seg_label}
    else:
        img, cls_label = data_label
        img, cls_label = img.cuda(), cls_label.long().cuda()
        cls_out = net(img)
        return {'cls_out': cls_out, 'cls_label': cls_label}


def resolve_val_data(results, use_aux):  # 测试
    results['cls_out'] = torch.argmax(results['cls_out'], dim=1)
    if use_aux:
        results['seg_out'] = torch.argmax(results['seg_out'], dim=1)
    return results


def calc_loss(loss_dict, results, logger, global_step):  # 计算损失函数
    loss = 0  # 初始化损失

    for i in range(len(loss_dict['name'])):
        data_src = loss_dict['data_src'][i]
        datas = [results[src] for src in data_src]
        loss_cur = loss_dict['op'][i](*datas)
        if global_step % 20 == 0:
            logger.add_scalar('loss/' + loss_dict['name'][i], loss_cur, global_step)
        loss += loss_cur * loss_dict['weight'][i]  # 损失=当前损失*损失权重
    return loss


def train(net, data_loader, loss_dict, optimizer, scheduler, logger, epoch, metric_dict, use_aux):
    net.train()
    progress_bar = dist_tqdm(train_loader)  # 进度条
    t_data_0 = time.time()  # 时间
    for b_idx, data_label in enumerate(progress_bar):
        t_data_1 = time.time()  # 时间
        reset_metrics(metric_dict)
        global_step = epoch * len(data_loader) + b_idx  # 全局的步数

        t_net_0 = time.time()  # 时间
        results = inference(net, data_label, use_aux)

        loss = calc_loss(loss_dict, results, logger, global_step)  # 构建损失函数
        optimizer.zero_grad()  # 梯度归零
        loss.backward()  # 损失反向传播（计算梯度）
        optimizer.step()  # 权重更新
        scheduler.step(global_step)
        t_net_1 = time.time()  # 时间

        results = resolve_val_data(results, use_aux)

        update_metrics(metric_dict, results)
        if global_step % 20 == 0:
            for me_name, me_op in zip(metric_dict['name'], metric_dict['op']):
                logger.add_scalar('metric/' + me_name, me_op.get(), global_step=global_step)
        logger.add_scalar('meta/lr', optimizer.param_groups[0]['lr'], global_step=global_step)

        if hasattr(progress_bar, 'set_postfix'):
            kwargs = {me_name: '%.3f' % me_op.get() for me_name, me_op in zip(metric_dict['name'], metric_dict['op'])}
        progress_bar.set_postfix(loss='%.3f' % float(loss),
                                 data_time='%.3f' % float(t_data_1 - t_data_0),
                                 net_time='%.3f' % float(t_net_1 - t_net_0),
                                 **kwargs)
        t_data_0 = time.time()


if __name__ == "__main__":  # 实例化
    torch.backends.cudnn.benchmark = True  # 若为 True，则cuDNN对多个卷积算法进行基准测试并选择最快的
    opt = get_args()
    work_dir = get_work_dir(opt)  # 创建权重保存路径
    distributed = False  # 是否分布式训练
    if 'WORLD_SIZE' in os.environ:
        distributed = int(os.environ['WORLD_SIZE']) > 1
    if distributed:
        import torch.distributed

        torch.cuda.set_device(0)
        torch.distributed.init_process_group(backend='nccl', init_method='env://')
    dist_print(datetime.datetime.now().strftime('[%Y/%m/%d %H:%M:%S]') + ' start training...')
    dist_print(opt)
    assert opt.backbone in ['18', '34', '50', '101', '152', '50next', '101next', '50wide', '101wide']

    # 加载数据集
    train_loader = get_train_loader(opt.batch_size, opt.source, opt.griding_num, opt.use_aux, distributed,
                                    opt.num_lanes)
    # 加载模型
    net = parsingNet(pretrained=True, backbone=opt.backbone,
                     cls_dim=(opt.griding_num + 1, opt.row_anchor, opt.num_lanes), use_aux=opt.use_aux).cuda()

    if distributed:  # 分布式数据并行
        net = torch.nn.parallel.DistributedDataParallel(net, device_ids=[opt.local_rank])
    # 构建优化器
    optimizer = get_optimizer(net, opt)

    if opt.resume is not None:  # 继续训练
        dist_print('==> Resume model from ' + opt.resume)
        resume_dict = torch.load(opt.resume, map_location='cpu')
        net.load_state_dict(resume_dict['model'])
        if 'optimizer' in resume_dict.keys():
            optimizer.load_state_dict(resume_dict['optimizer'])
        resume_epoch = int(os.path.split(opt.resume)[1][2:5]) + 1
    else:
        resume_epoch = 0

    scheduler = get_scheduler(optimizer, opt, len(train_loader))
    dist_print(len(train_loader))
    metric_dict = get_metric_dict(opt)
    loss_dict = get_loss_dict(opt)
    logger = get_logger(work_dir, opt)
    cp_projects(opt.auto_backup, work_dir)
    # 开始训练
    for epoch in range(resume_epoch, opt.epoch):
        # 传入参数： 网络，训练数据集，损失字典，优化器，调度器，
        print(f'epoch:{epoch}:')
        train(net, train_loader, loss_dict, optimizer, scheduler, logger, epoch, metric_dict, opt.use_aux)  # 训练循环
        if(epoch % 20 == 0):
            save_model(net, optimizer, epoch, work_dir, distributed)  # 模型保存
    logger.close()
