"""
This file implements the training process and all the summaries
"""
import os
import numpy as np
import cv2
import torch
from torch.nn.functional import pixel_shuffle, softmax
from torch.utils.data import DataLoader
import torch.utils.data.dataloader as torch_loader
from tensorboardX import SummaryWriter
import torch.optim.lr_scheduler as lr_scheduler

from dataset.dataset_util import get_dataset
from model.model_util import get_model
from model.loss import TotalLoss, get_loss_and_weights
from model.metrics import AverageMeter, Metrics, super_nms
from model.lr_scheduler import get_lr_scheduler
from misc.train_utils import (convert_image, get_latest_checkpoint,
                              remove_old_checkpoints)

from multi_train_utils.distributed_utils import init_distributed_mode, dist, cleanup,reduce_value,is_main_process
import tempfile
from tqdm import tqdm
import sys


def customized_collate_fn(batch):
    """ Customized collate_fn. """
    batch_keys = ["image", "junction_map", "heatmap", "angle", "valid_mask", "train_mask", "angle_mask"]
    list_keys = ["junctions", "line_map"]

    outputs = {}
    for key in batch_keys:
        outputs[key] = torch_loader.default_collate([b[key] for b in batch])
    for key in list_keys:
        outputs[key] = [b[key] for b in batch]

    return outputs


def restore_weights(model, state_dict, strict=True):
    """ Restore weights in compatible mode. """
    # Try to directly load state dict
    try:
        model.load_state_dict(state_dict, strict=strict)
    # Deal with some version compatibility issue (catch version incompatible)
    except:
        err = model.load_state_dict(state_dict, strict=False)

        # missing keys are those in model but not in state_dict
        missing_keys = err.missing_keys
        # Unexpected keys are those in state_dict but not in model
        unexpected_keys = err.unexpected_keys

        # Load mismatched keys manually
        model_dict = model.state_dict()
        for idx, key in enumerate(missing_keys):
            dict_keys = [_ for _ in unexpected_keys if not "tracked" in _]
            model_dict[key] = state_dict[dict_keys[idx]]
        model.load_state_dict(model_dict)

    return model


def train_net(args, dataset_cfg, model_cfg, output_path,device):
    """ Main training function. """
    # Add some version compatibility check
    if model_cfg.get("weighting_policy") is None:
        # Default to static
        model_cfg["weighting_policy"] = "static"

    # Get the train, val, test config
    train_cfg = model_cfg["train"]
    test_cfg = model_cfg["test"]

    # 初始化各进程环境
    init_distributed_mode(args=args)

    rank = args.rank

    if rank == 0:  # 在第一个进程中打印信息
        print(args)
        print('Start Tensorboard with "tensorboard --logdir=runs", view at http://localhost:6006/')

    if rank == 0:  # 在第一个进程中打印信息
        # Create train and test dataset
        print("\t Initializing dataset...")
    # train_dataset-SyntheticShapes类的实例化对象   train_collate_fn-synthetic_collate_fn函数
    train_dataset, train_collate_fn = get_dataset("train", dataset_cfg)
    test_dataset, test_collate_fn = get_dataset("test", dataset_cfg)

    #给每个rank对应的进程分配训练的样本索引
    train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
    test_sampler = torch.utils.data.distributed.DistributedSampler(test_dataset)

    #将样本索引每batch_size个元素组成一个lis
    train_batch_sampler = torch.utils.data.BatchSampler(train_sampler,train_cfg["batch_size"],drop_last=True)

    nw = min([os.cpu_count(), train_cfg["batch_size"] if train_cfg["batch_size"] > 1 else 0, 8])  # number of workers
    if rank == 0:
        print('Using {} dataloader workers every process'.format(nw))

    # Create the dataloader
    train_loader = DataLoader(train_dataset,
                              batch_sampler=train_batch_sampler,
                              num_workers=nw,
                              pin_memory=True,
                              collate_fn=train_collate_fn)
    test_loader = DataLoader(test_dataset,
                             batch_size=test_cfg.get("batch_size", 1),  # 6  4
                             sampler=test_sampler,
                             pin_memory=True,
                             num_workers=test_cfg.get("num_workers", 1),
                             collate_fn=test_collate_fn)
    if rank == 0:  # 在第一个进程中打印信息
        print("\t Successfully intialized dataloaders.")



    # Get the loss function and weight first    #合成数据集不需要与不训练 描述符损失函数和权重
    loss_funcs, loss_weights = get_loss_and_weights(model_cfg)

    # If resume.
    if args.resume:
        # Create model and load the state dict
        checkpoint = get_latest_checkpoint(args.resume_path,
                                           args.checkpoint_name)
        model = get_model(model_cfg, loss_weights)
        model = restore_weights(model, checkpoint["model_state_dict"])
        model = model.cuda()

        # args.lr *= args.world_size  # 学习率要根据并行GPU的数量进行倍增
        optimizer = torch.optim.Adam(
            [{"params": model.parameters(),
              "initial_lr": model_cfg["learning_rate"]*args.world_size}],
            model_cfg["learning_rate"]*args.world_size,
            amsgrad=True)
        optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
        #Optionally get the learning rate scheduler
        # 合成数据集训练时scheduler=None
        scheduler = get_lr_scheduler(
            lr_decay=model_cfg.get("lr_decay", False),
            lr_decay_cfg=model_cfg.get("lr_decay_cfg", None),
            optimizer=optimizer)
        # If we start to use learning rate scheduler from the middle
        if ((scheduler is not None)
                and (checkpoint.get("scheduler_state_dict", None) is not None)):
            scheduler.load_state_dict(checkpoint["scheduler_state_dict"])
        start_epoch = checkpoint["epoch"] + 1
    # 从头训练
    else:
        # Create model and optimizer
        model = get_model(model_cfg, loss_weights)
        # Optionally get the pretrained wieghts
        if args.pretrained:
            print("\t [Debug] Loading pretrained weights...")
            checkpoint = get_latest_checkpoint(args.pretrained_path,
                                               args.checkpoint_name)
            # If auto weighting restore from non-auto weighting
            model = restore_weights(model, checkpoint["model_state_dict"],
                                    strict=False)
            print("\t [Debug] Finished loading pretrained weights!")
        else:
            checkpoint_path = os.path.join(tempfile.gettempdir(), "initial_weights.pt")
            # 如果不存在预训练权重，需要将第一个进程中的权重保存，然后其他进程载入，保持初始化权重一致
            if rank == 0:
                torch.save(model.state_dict(), checkpoint_path)

            dist.barrier()
            # 这里注意，一定要指定map_location参数，否则会导致第一块GPU占用更多资源
            model.load_state_dict(torch.load(checkpoint_path, map_location=device))
        model = model.cuda()
        #args.lr *= args.world_size  # 学习率要根据并行GPU的数量进行倍增
        optimizer = torch.optim.Adam(
            [{"params": model.parameters(),
              "initial_lr": model_cfg["learning_rate"]*args.world_size}],  # 0.0005
            model_cfg["learning_rate"]*args.world_size,
            amsgrad=True)
        #Optionally get the learning rate scheduler
        # 合成数据集训练时scheduler=None
        scheduler = get_lr_scheduler(
            lr_decay=model_cfg.get("lr_decay", False),
            lr_decay_cfg=model_cfg.get("lr_decay_cfg", None),
            optimizer=optimizer)
        start_epoch = 0

    if rank == 0:  # 在第一个进程中打印信息
        print("\t Successfully initialized model")

    # 判断是否同步BN
    # 只有训练带有BN结构的网络时使用SyncBatchNorm采用意义
    if args.syncBN:
        # 使用SyncBatchNorm后训练会更耗时
        model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)

    # 转为DDP模型
    model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])

    # Define the total loss
    policy = model_cfg.get("weighting_policy", "static")  # dynamic
    loss_func = TotalLoss(loss_funcs, loss_weights, policy).cuda()
    if "descriptor_decoder" in model_cfg:
        metric_func = Metrics(model_cfg["detection_thresh"],  # 1/65
                              model_cfg["prob_thresh"],  # 0.5
                              model_cfg["descriptor_loss_cfg"]["grid_size"],  # 4
                              desc_metric_lst='all')
    else:
        # Metrics类实例化对象初始化
        metric_func = Metrics(model_cfg["detection_thresh"],  # 1/65
                              model_cfg["prob_thresh"],  # 0.5
                              model_cfg["grid_size"])  # 8

    # Define the summary writer
    # ./experiments/sold2_wireframe/log
    #第一个进程中创建真正的tensorboard文件
    if rank==0:
        logdir = os.path.join(output_path, "log")
        writer = SummaryWriter(logdir=logdir)
    else:
        logdir = os.path.join(output_path, "log1")
        writer = SummaryWriter(logdir=logdir)


    # Start the training loop
    for epoch in range(start_epoch, model_cfg["epochs"]):
        train_sampler.set_epoch(epoch)

        # Record the learning rate
        current_lr = optimizer.state_dict()["param_groups"][0]["lr"]
        if rank == 0:  # 在第一个进程中打印信息
            # 加入标量数据
            writer.add_scalar("LR/lr", current_lr, epoch)


            # Train for one epochs
            print("\n\n================== Training ====================")
        train_single_epoch(
            model=model,
            model_cfg=model_cfg,
            optimizer=optimizer,
            loss_func=loss_func,
            metric_func=metric_func,
            train_loader=train_loader,
            writer=writer,
            device=device,
            epoch=epoch)

        # Update the scheduler
        if scheduler is not None:
            scheduler.step()

        if is_main_process():
            # Do the validation
            print("\n\n================== Validation ==================")
        validate(
            model=model,
            model_cfg=model_cfg,
            loss_func=loss_func,
            metric_func=metric_func,
            val_loader=test_loader,
            writer=writer,
            device=device,
            epoch=epoch)

        if rank == 0:
            # Save checkpoints
            # ./experiments/sold2_synth/checkpoint-epoch(epoch_num)-end.tar
            # ./experiments/sold2_wireframe/checkpoint-epoch(epoch_num)-end.tar
            # ./experiments/sold2_wireframe/checkpoint-epoch(epoch_num)-end.tar
            file_name = os.path.join(output_path,
                                     "checkpoint-epoch%03d-end.tar" % (epoch))
            print("[Info] Saving checkpoint %s ..." % file_name)
            save_dict = {
                "epoch": epoch,
                "model_state_dict": model.state_dict(),
                "optimizer_state_dict": optimizer.state_dict(),
                "model_cfg": model_cfg}
            if scheduler is not None:
                save_dict.update({"scheduler_state_dict": scheduler.state_dict()})
            torch.save(save_dict, file_name)

            # Remove the outdated checkpoints
            remove_old_checkpoints(output_path, model_cfg.get("max_ckpt", 15))  # max_ckpt=150

    # 删除临时缓存文件
    if rank == 0:
        if os.path.exists(checkpoint_path) is True:
            os.remove(checkpoint_path)

    cleanup()

def train_single_epoch(model, model_cfg, optimizer, loss_func, metric_func,
                       train_loader, writer, device,epoch):
    """ Train for one epoch. """
    # Switch the model to training mode nn.Module的方法
    model.train()
    optimizer.zero_grad()

    # Initialize the average meter
    compute_descriptors = loss_func.compute_descriptors
    # 判断是否使用描述符损失函数与权重参数
    if compute_descriptors:
        average_meter = AverageMeter(is_training=True, desc_metric_lst='all')
    else:
        # AverageMeter类实例化对象初始化
        average_meter = AverageMeter(is_training=True)

    # 在进程0中打印训练进度
    if is_main_process():
        train_loader = tqdm(train_loader, file=sys.stdout)

    # The training loop
    for idx, data in enumerate(train_loader):
        if compute_descriptors:
            junc_map = data["ref_junction_map"].cuda()
            junc_map2 = data["target_junction_map"].cuda()
            heatmap = data["ref_heatmap"].cuda()
            heatmap2 = data["target_heatmap"].cuda()
            line_points = data["ref_line_points"].cuda()
            line_points2 = data["target_line_points"].cuda()
            line_indices = data["ref_line_indices"].cuda()
            valid_mask = data["ref_valid_mask"].cuda()
            valid_mask2 = data["target_valid_mask"].cuda()
            input_images = data["ref_image"].cuda()
            input_images2 = data["target_image"].cuda()
            angle = data["ref_angle"].cuda()
            angle2 = data["target_angle"].cuda()
            train_mask = data["ref_train_mask"].cuda()
            train_mask2 = data["target_train_mask"].cuda()
            angle_mask = data["ref_angle_mask"].cuda()
            angle_mask2 = data["target_angle_mask"].cuda()

            # Run the forward pass
            outputs = model(input_images)
            outputs2 = model(input_images2)

            # Compute losses
            losses = loss_func.forward_descriptors(
                outputs["junctions"], outputs2["junctions"],
                junc_map, junc_map2, outputs["heatmap"], outputs2["heatmap"],
                heatmap, heatmap2, line_points, line_points2,
                line_indices, outputs['descriptors'], outputs2['descriptors'],
                outputs["angle"], outputs2["angle"], angle, angle2,
                epoch, valid_mask, valid_mask2, train_mask, train_mask2, angle_mask, angle_mask2)
        else:
            junc_map = data["junction_map"].cuda()
            heatmap = data["heatmap"].cuda()
            angle = data["angle"].cuda()
            valid_mask = data["valid_mask"].cuda()
            input_images = data["image"].cuda()

            # Run the forward pass
            outputs = model(input_images)

            # Compute losses
            losses = loss_func(
                outputs["junctions"], junc_map,
                outputs["heatmap"], heatmap,
                outputs["angle"], angle,
                valid_mask)

        # 返回总损失数值
        total_loss = losses["total_loss"]

        # Update the model
        total_loss.backward()  # 反向传播求梯度
        # 多个GPU计算loss的均值
        total_loss=reduce_value(total_loss,average=True)
        # 在进程0中打印total_loss
        if is_main_process():
            train_loader.desc = "[epoch {}] total loss {}".format(epoch, round(total_loss.item(), 3))
        # for i in losses.keys():
        #     losses[i]=reduce_value(torch.as_tensor(losses[i]),average=True)
        optimizer.step()  # 更新所有参数
        optimizer.zero_grad()

        # Compute the global step   总步数
        global_step = epoch * len(train_loader) + idx
        ############## Measure the metric error #########################

        #在进程0中处理数据
        if is_main_process():
            # Only do this when needed
            if (((idx % model_cfg["disp_freq"]) == 0)  # 100
                    or ((idx % model_cfg["summary_freq"]) == 0)):  # 200
                # {"junc_pred","junc_pred_nms","junc_prob"} 原始特征点图   非极大值抑制特征点图 64通道之和的1/8置信度图
                junc_np = convert_junc_predictions(
                    outputs["junctions"], model_cfg["grid_size"],
                    model_cfg["detection_thresh"], 300)
                junc_map_np = junc_map.cpu().numpy().transpose(0, 2, 3, 1)

                # Always fetch only one channel (compatible with L1, L2, and CE)
                if outputs["heatmap"].shape[1] == 2:
                    heatmap_np = softmax(outputs["heatmap"].detach(),
                                         dim=1).cpu().numpy()
                    heatmap_np = heatmap_np.transpose(0, 2, 3, 1)[:, :, :, 1:]
                else:
                    heatmap_np = torch.sigmoid(outputs["heatmap"].detach())
                    heatmap_np = heatmap_np.cpu().numpy().transpose(0, 2, 3, 1)
                angle_np = outputs["angle"].detach()
                angle_np = angle_np.cpu().numpy().transpose(0, 2, 3, 1)

                heatmap_gt_np = heatmap.cpu().numpy().transpose(0, 2, 3, 1)
                angle_gt_np = angle.cpu().numpy().transpose(0,2, 3, 1)
                valid_mask_np = valid_mask.cpu().numpy().transpose(0, 2, 3, 1)

                # Evaluate metric results
                if compute_descriptors:
                    metric_func.evaluate(
                        junc_np["junc_pred"], junc_np["junc_pred_nms"],
                        junc_map_np, heatmap_np, heatmap_gt_np, valid_mask_np,
                        line_points, line_points2, outputs["descriptors"],
                        outputs2["descriptors"], line_indices)
                else:
                    metric_func.evaluate(
                        junc_np["junc_pred"], junc_np["junc_pred_nms"],
                        junc_map_np, heatmap_np, heatmap_gt_np, valid_mask_np)
                # Update average meter
                junc_loss = losses["junc_loss"].item()
                heatmap_loss = losses["heatmap_loss"].item()
                Angleloss = losses["Angleloss"].item()
                loss_dict = {
                    "junc_loss": junc_loss,
                    "heatmap_loss": heatmap_loss,
                    "Angleloss": Angleloss,
                    "total_loss": total_loss.item()}
                if compute_descriptors:
                    descriptor_loss = losses["descriptor_loss"].item()
                    loss_dict["descriptor_loss"] = losses["descriptor_loss"].item()

                average_meter.update(metric_func, loss_dict, num_samples=junc_map.shape[0])

            # Display the progress
            if (idx % model_cfg["disp_freq"]) == 0:  # 100
                results = metric_func.metric_results
                average = average_meter.average()
                # Get gpu memory usage in GB
                gpu_mem_usage = torch.cuda.max_memory_allocated() / (1024 ** 3)
                if compute_descriptors:
                    print(
                        "Epoch [%d / %d] Iter [%d / %d] loss=%.4f (%.4f), junc_loss=%.4f (%.4f), heatmap_loss=%.4f (%.4f), Angleloss=%.4f (%.4f), descriptor_loss=%.4f (%.4f), gpu_mem=%.4fGB"
                        % (epoch, model_cfg["epochs"], idx, len(train_loader),
                           total_loss.item(), average["total_loss"], junc_loss,
                           average["junc_loss"], heatmap_loss,
                           average["heatmap_loss"], Angleloss,
                           average["Angleloss"],descriptor_loss,
                           average["descriptor_loss"], gpu_mem_usage))
                else:
                    print(
                        "Epoch [%d / %d] Iter [%d / %d] loss=%.4f (%.4f), junc_loss=%.4f (%.4f), heatmap_loss=%.4f (%.4f), Angleloss=%.4f (%.4f), reg_loss=%.4f,w_junc=%.4f,w_heatmap=%.4f,w_angle=%.4f,gpu_mem=%.4fGB"
                        % (epoch, model_cfg["epochs"], idx, len(train_loader),
                           total_loss.item(), average["total_loss"],
                           junc_loss, average["junc_loss"], heatmap_loss,
                           average["heatmap_loss"], Angleloss,
                           average["Angleloss"],losses["reg_loss"].item(),losses["w_junc"],losses["w_heatmap"],losses["w_angle"],gpu_mem_usage))
                print("\t Junction     precision=%.4f (%.4f) / recall=%.4f (%.4f)"
                      % (results["junc_precision"], average["junc_precision"],
                         results["junc_recall"], average["junc_recall"]))
                print("\t Junction nms precision=%.4f (%.4f) / recall=%.4f (%.4f)"
                      % (results["junc_precision_nms"],
                         average["junc_precision_nms"],
                         results["junc_recall_nms"], average["junc_recall_nms"]))
                print("\t Heatmap      precision=%.4f (%.4f) / recall=%.4f (%.4f)"
                      % (results["heatmap_precision"],
                         average["heatmap_precision"],
                         results["heatmap_recall"], average["heatmap_recall"]))
                print("\t Pro_background = %.4f" % losses["Pro_background"].item())
                print("\t Pro_target = %.4f" % losses["Pro_target"].item())
                print("\t CrossEntropyloss = %.4f" % losses["CrossEntropyloss"].item())
                print("\t heatmap_max = %.4f" % np.max(heatmap_np))
                if compute_descriptors:
                    print("\t Descriptors  matching score=%.4f (%.4f)"
                          % (results["matching_score"], average["matching_score"]))

            # Record summaries
            if (idx % model_cfg["summary_freq"]) == 0:  # 200
                results = metric_func.metric_results
                average = average_meter.average()
                # Add the shared losses
                scalar_summaries = {
                    "junc_loss": junc_loss,
                    "heatmap_loss": heatmap_loss,
                    "Angleloss": Angleloss,
                    "CrossEntropyloss": losses["CrossEntropyloss"].item(),
                    "Pro_background": losses["Pro_background"].item(),
                    "Pro_target": losses["Pro_target"].item(),
                    "total_loss": total_loss.detach().cpu().numpy(),
                    "metrics": results,
                    "average": average}
                # Add descriptor terms
                if compute_descriptors:
                    scalar_summaries["descriptor_loss"] = descriptor_loss
                    scalar_summaries["w_desc"] = losses["w_desc"]

                # Add weighting terms (even for static terms)
                scalar_summaries["w_junc"] = losses["w_junc"]
                scalar_summaries["w_heatmap"] = losses["w_heatmap"]
                scalar_summaries["w_angle"] = losses["w_angle"]
                scalar_summaries["reg_loss"] = losses["reg_loss"].item()

                num_images = 3
                junc_pred_binary = (junc_np["junc_pred"][:num_images, ...]
                                    > model_cfg["detection_thresh"])  # 1/65
                junc_pred_nms_binary = (junc_np["junc_pred_nms"][:num_images, ...]
                                        > model_cfg["detection_thresh"])
                image_summaries = {
                    "image": input_images.cpu().numpy()[:num_images, ...],
                    "valid_mask": valid_mask_np[:num_images, ...],
                    "junc_map_pred": junc_pred_binary,
                    "junc_map_pred_nms": junc_pred_nms_binary,
                    "junc_map_gt": junc_map_np[:num_images, ...],
                    "junc_prob_map": junc_np["junc_prob"][:num_images, ...],
                    "heatmap_pred": heatmap_np[:num_images, ...],
                    "heatmap_gt": heatmap_gt_np[:num_images, ...],
                    "angle_pred": angle_np[:num_images, ...],
                    "angle_gt": angle_gt_np[:num_images, ...]}
                # Record the training summary
                record_train_summaries(
                    writer, global_step, scalars=scalar_summaries,
                    images=image_summaries)



    #等待所有进程计算完毕
    if device != torch.device("cpu"):
        torch.cuda.synchronize(device)

@torch.no_grad()
def validate(model, model_cfg, loss_func, metric_func, val_loader, writer, device,epoch):
    """ Validation. """
    # Switch the model to eval mode
    model.eval()

    # Initialize the average meter
    compute_descriptors = loss_func.compute_descriptors
    if compute_descriptors:
        average_meter = AverageMeter(is_training=True, desc_metric_lst='all')
    else:
        average_meter = AverageMeter(is_training=True)

    # 在进程0中打印训练进度
    if is_main_process():
        val_loader = tqdm(val_loader, file=sys.stdout)

    #idx1=0
    # The validation loop
    for idx, data in enumerate(val_loader):
        if compute_descriptors:
            junc_map = data["ref_junction_map"].cuda()
            junc_map2 = data["target_junction_map"].cuda()
            heatmap = data["ref_heatmap"].cuda()
            heatmap2 = data["target_heatmap"].cuda()
            line_points = data["ref_line_points"].cuda()
            line_points2 = data["target_line_points"].cuda()
            line_indices = data["ref_line_indices"].cuda()
            valid_mask = data["ref_valid_mask"].cuda()
            valid_mask2 = data["target_valid_mask"].cuda()
            input_images = data["ref_image"].cuda()
            input_images2 = data["target_image"].cuda()
            angle = data["ref_angle"].cuda()
            angle2 = data["target_angle"].cuda()
            train_mask = data["ref_train_mask"].cuda()
            train_mask2 = data["target_train_mask"].cuda()
            angle_mask = data["ref_angle_mask"].cuda()
            angle_mask2 = data["target_angle_mask"].cuda()

            # Run the forward pass
            with torch.no_grad():
                outputs = model(input_images)
                outputs2 = model(input_images2)

                # Compute losses
                losses = loss_func.forward_descriptors(
                    outputs["junctions"], outputs2["junctions"],
                    junc_map, junc_map2, outputs["heatmap"],
                    outputs2["heatmap"], heatmap, heatmap2, line_points,
                    line_points2, line_indices, outputs['descriptors'],
                    outputs2['descriptors'], outputs["angle"], outputs2["angle"], angle, angle2,
                    epoch, valid_mask, valid_mask2, train_mask, train_mask2, angle_mask, angle_mask2)
        else:
            junc_map = data["junction_map"].cuda()
            heatmap = data["heatmap"].cuda()
            angle = data["angle"].cuda()
            valid_mask = data["valid_mask"].cuda()
            input_images = data["image"].cuda()

            # Run the forward pass
            with torch.no_grad():
                outputs = model(input_images)

                # Compute losses
                losses = loss_func(
                    outputs["junctions"], junc_map,
                    outputs["heatmap"], heatmap,
                    outputs["angle"], angle,
                    valid_mask)
        total_loss = losses["total_loss"]

        total_loss=reduce_value(total_loss,average=True)

        ############## Measure the metric error #########################
        #在进程0中处理数据
        if is_main_process():
            junc_np = convert_junc_predictions(
                outputs["junctions"], model_cfg["grid_size"],
                model_cfg["detection_thresh"], 300)
            junc_map_np = junc_map.cpu().numpy().transpose(0, 2, 3, 1)
            # Always fetch only one channel (compatible with L1, L2, and CE)
            if outputs["heatmap"].shape[1] == 2:
                heatmap_np = softmax(outputs["heatmap"].detach(),
                                     dim=1).cpu().numpy().transpose(0, 2, 3, 1)
                heatmap_np = heatmap_np[:, :, :, 1:]
            else:
                heatmap_np = torch.sigmoid(outputs["heatmap"].detach())
                heatmap_np = heatmap_np.cpu().numpy().transpose(0, 2, 3, 1)

            heatmap_gt_np = heatmap.cpu().numpy().transpose(0, 2, 3, 1)
            valid_mask_np = valid_mask.cpu().numpy().transpose(0, 2, 3, 1)

            # Evaluate metric results
            if compute_descriptors:
                metric_func.evaluate(
                    junc_np["junc_pred"], junc_np["junc_pred_nms"],
                    junc_map_np, heatmap_np, heatmap_gt_np, valid_mask_np,
                    line_points, line_points2, outputs["descriptors"],
                    outputs2["descriptors"], line_indices)
            else:
                metric_func.evaluate(
                    junc_np["junc_pred"], junc_np["junc_pred_nms"], junc_map_np,
                    heatmap_np, heatmap_gt_np, valid_mask_np)
            # Update average meter
            junc_loss = losses["junc_loss"].item()
            heatmap_loss = losses["heatmap_loss"].item()
            Angleloss = losses["Angleloss"].item()
            loss_dict = {
                "junc_loss": junc_loss,
                "heatmap_loss": heatmap_loss,
                "Angleloss": Angleloss,
                "total_loss": total_loss.item()}
            if compute_descriptors:
                descriptor_loss = losses["descriptor_loss"].item()
                loss_dict["descriptor_loss"] = descriptor_loss
            average_meter.update(metric_func, loss_dict, num_samples=junc_map.shape[0])

            # Display the progress
            if (idx % model_cfg["disp_freq"]) == 0:
                results = metric_func.metric_results
                average = average_meter.average()
                if compute_descriptors:
                    print(
                        "Iter [%d / %d] loss=%.4f (%.4f), junc_loss=%.4f (%.4f), heatmap_loss=%.4f (%.4f), Angleloss=%.4f (%.4f), descriptor_loss=%.4f (%.4f)"
                        % (idx, len(val_loader),
                           total_loss.item(), average["total_loss"],
                           junc_loss, average["junc_loss"],
                           heatmap_loss, average["heatmap_loss"],
                           Angleloss, average["Angleloss"],
                           descriptor_loss, average["descriptor_loss"]))
                else:
                    print("Iter [%d / %d] loss=%.4f (%.4f), junc_loss=%.4f (%.4f), heatmap_loss=%.4f (%.4f), Angleloss=%.4f (%.4f)"
                          % (idx, len(val_loader),
                             total_loss.item(), average["total_loss"],
                             junc_loss, average["junc_loss"],
                             heatmap_loss, average["heatmap_loss"],
                             Angleloss, average["Angleloss"]))
                print("\t Junction     precision=%.4f (%.4f) / recall=%.4f (%.4f)"
                      % (results["junc_precision"], average["junc_precision"],
                         results["junc_recall"], average["junc_recall"]))
                print("\t Junction nms precision=%.4f (%.4f) / recall=%.4f (%.4f)"
                      % (results["junc_precision_nms"],
                         average["junc_precision_nms"],
                         results["junc_recall_nms"], average["junc_recall_nms"]))
                print("\t Heatmap      precision=%.4f (%.4f) / recall=%.4f (%.4f)"
                      % (results["heatmap_precision"],
                         average["heatmap_precision"],
                         results["heatmap_recall"], average["heatmap_recall"]))
                if compute_descriptors:
                    print("\t Descriptors  matching score=%.4f (%.4f)"
                          % (results["matching_score"], average["matching_score"]))


    #在进程0中存储文件
    if is_main_process():
        # Record summaries
        average = average_meter.average()
        scalar_summaries = {
            "junc_loss": junc_loss,
            "heatmap_loss": heatmap_loss,
            "Angleloss": Angleloss,
            "total_loss": total_loss,
            "CrossEntropyloss": losses["CrossEntropyloss"].item(),
            "Pro_background": losses["Pro_background"].item(),
            "Pro_target": losses["Pro_target"].item(),
            "average": average}
        # Record the training summary
        record_test_summaries(writer, epoch, scalar_summaries)

    # 等待所有进程计算完毕
    if device != torch.device("cpu"):
        torch.cuda.synchronize(device)


# {"junc_pred","junc_pred_nms","junc_prob"} 原始特征点图   非极大值抑制特征点图 64通道之和的1/8置信度图
def convert_junc_predictions(predictions, grid_size,
                             detect_thresh=1 / 65, topk=300):
    """ Convert torch predictions to numpy arrays for evaluation. """
    # Convert to probability outputs first
    # detch将网络中一部分分量从反向传播的流程中拿出来，但是拿出来的向量与原向量的地址一样
    junc_prob = softmax(predictions.detach(), dim=1).cpu()
    # 通道维度，除了最后一个元素都取出来
    junc_pred = junc_prob[:, :-1, :, :]  # batch_size x 64 x H/8 x W/8

    junc_prob_np = junc_prob.numpy().transpose(0, 2, 3, 1)[:, :, :, :-1]  # batch_size x H/8 x W/8 x 64
    junc_prob_np = np.sum(junc_prob_np, axis=-1)  # 最后一个维度上求和
    # batch_size x H x W x 1
    junc_pred_np = pixel_shuffle(
        junc_pred, grid_size).cpu().numpy().transpose(0, 2, 3, 1)
    # 返回batch_size x  H x W 大小的numpy变量，特征点部分是置信度值，非特征点部分为0(非极大值抑制后)
    junc_pred_np_nms = super_nms(junc_pred_np, grid_size, detect_thresh, topk)
    junc_pred_np = junc_pred_np.squeeze(-1)  # batch_size x H x W

    return {"junc_pred": junc_pred_np, "junc_pred_nms": junc_pred_np_nms,
            "junc_prob": junc_prob_np}


def record_train_summaries(writer, global_step, scalars, images):
    """ Record training summaries. """
    # Record the scalar summaries
    results = scalars["metrics"]
    average = scalars["average"]

    # GPU memory part
    # Get gpu memory usage in GB
    gpu_mem_usage = torch.cuda.max_memory_allocated() / (1024 ** 3)
    writer.add_scalar("GPU/GPU_memory_usage", gpu_mem_usage, global_step)

    # Loss part
    writer.add_scalar("Train_loss/junc_loss", scalars["junc_loss"],
                      global_step)
    writer.add_scalar("Train_loss/heatmap_loss", scalars["heatmap_loss"],
                      global_step)
    writer.add_scalar("Train_loss/Angleloss", scalars["Angleloss"],
                      global_step)
    writer.add_scalar("Train_loss/total_loss", scalars["total_loss"],
                      global_step)
    writer.add_scalar("Train_loss/CrossEntropyloss", scalars["CrossEntropyloss"],
                      global_step)
    writer.add_scalar("Train_loss/Pro_background", scalars["Pro_background"],
                      global_step)
    writer.add_scalar("Train_loss/Pro_target", scalars["Pro_target"],
                      global_step)
    # Add regularization loss
    if "reg_loss" in scalars.keys():
        writer.add_scalar("Train_loss/reg_loss", scalars["reg_loss"],
                          global_step)
    # Add descriptor loss
    if "descriptor_loss" in scalars.keys():
        key = "descriptor_loss"
        writer.add_scalar("Train_loss/%s" % (key), scalars[key], global_step)
        writer.add_scalar("Train_loss_average/%s" % (key), average[key],
                          global_step)

    # Record weighting
    for key in scalars.keys():
        if "w_" in key:
            writer.add_scalar("Train_weight/%s" % (key), scalars[key],
                              global_step)

    # Smoothed loss
    writer.add_scalar("Train_loss_average/junc_loss", average["junc_loss"],
                      global_step)
    writer.add_scalar("Train_loss_average/heatmap_loss",
                      average["heatmap_loss"], global_step)
    writer.add_scalar("Train_loss_average/Angleloss",
                      average["Angleloss"], global_step)
    writer.add_scalar("Train_loss_average/total_loss", average["total_loss"],
                      global_step)
    # Add smoothed descriptor loss
    if "descriptor_loss" in average.keys():
        writer.add_scalar("Train_loss_average/descriptor_loss",
                          average["descriptor_loss"], global_step)

    # Metrics part
    writer.add_scalar("Train_metrics/junc_precision",
                      results["junc_precision"], global_step)
    writer.add_scalar("Train_metrics/junc_precision_nms",
                      results["junc_precision_nms"], global_step)
    writer.add_scalar("Train_metrics/junc_recall",
                      results["junc_recall"], global_step)
    writer.add_scalar("Train_metrics/junc_recall_nms",
                      results["junc_recall_nms"], global_step)
    writer.add_scalar("Train_metrics/heatmap_precision",
                      results["heatmap_precision"], global_step)
    writer.add_scalar("Train_metrics/heatmap_recall",
                      results["heatmap_recall"], global_step)
    # Add descriptor metric
    if "matching_score" in results.keys():
        writer.add_scalar("Train_metrics/matching_score",
                          results["matching_score"], global_step)

    # Average part
    writer.add_scalar("Train_metrics_average/junc_precision",
                      average["junc_precision"], global_step)
    writer.add_scalar("Train_metrics_average/junc_precision_nms",
                      average["junc_precision_nms"], global_step)
    writer.add_scalar("Train_metrics_average/junc_recall",
                      average["junc_recall"], global_step)
    writer.add_scalar("Train_metrics_average/junc_recall_nms",
                      average["junc_recall_nms"], global_step)
    writer.add_scalar("Train_metrics_average/heatmap_precision",
                      average["heatmap_precision"], global_step)
    writer.add_scalar("Train_metrics_average/heatmap_recall",
                      average["heatmap_recall"], global_step)
    # Add smoothed descriptor metric
    if "matching_score" in average.keys():
        writer.add_scalar("Train_metrics_average/matching_score",
                          average["matching_score"], global_step)

    # Record the image summary
    # Image part
    image_tensor = convert_image(images["image"], 1)
    valid_masks = convert_image(images["valid_mask"], -1)
    writer.add_images("Train/images", image_tensor, global_step,
                      dataformats="NCHW")
    writer.add_images("Train/valid_map", valid_masks, global_step,
                      dataformats="NHWC")

    # Heatmap part
    writer.add_images("Train/heatmap_gt",
                      convert_image(images["heatmap_gt"], -1), global_step,
                      dataformats="NHWC")
    writer.add_images("Train/heatmap_pred",
                      convert_image(images["heatmap_pred"], -1), global_step,
                      dataformats="NHWC")

    # Angle part
    writer.add_images("Train/angle_gt",
                      images["angle_gt"], global_step,
                      dataformats="NHWC")

    writer.add_images("Train/angle_pred",
                      images["angle_pred"], global_step,
                      dataformats="NHWC")

    # Junction prediction part
    junc_plots = plot_junction_detection(
        image_tensor, images["junc_map_pred"],
        images["junc_map_pred_nms"], images["junc_map_gt"])
    writer.add_images("Train/junc_gt", junc_plots["junc_gt_plot"] / 255.,
                      global_step, dataformats="NHWC")
    writer.add_images("Train/junc_pred", junc_plots["junc_pred_plot"] / 255.,
                      global_step, dataformats="NHWC")
    writer.add_images("Train/junc_pred_nms",
                      junc_plots["junc_pred_nms_plot"] / 255., global_step,
                      dataformats="NHWC")
    writer.add_images(
        "Train/junc_prob_map",
        convert_image(images["junc_prob_map"][..., None], axis=-1),
        global_step, dataformats="NHWC")


def record_test_summaries(writer, epoch, scalars):
    """ Record testing summaries. """
    average = scalars["average"]

    # not Average loss part
    writer.add_scalar("Val_loss/junc_loss", scalars["junc_loss"],
                      epoch)
    writer.add_scalar("Val_loss/heatmap_loss", scalars["heatmap_loss"],
                      epoch)
    writer.add_scalar("Val_loss/Angleloss", scalars["Angleloss"],
                      epoch)
    writer.add_scalar("Val_loss/total_loss", scalars["total_loss"],
                      epoch)
    writer.add_scalar("Val_loss/CrossEntropyloss", scalars["CrossEntropyloss"],
                      epoch)

    writer.add_scalar("Val_loss/Pro_background", scalars["Pro_background"],
                      epoch)
    writer.add_scalar("Val_loss/Pro_target", scalars["Pro_target"],
                      epoch)

    # Average loss
    writer.add_scalar("Val_loss_Average/junc_loss", average["junc_loss"], epoch)
    writer.add_scalar("Val_loss_Average/heatmap_loss", average["heatmap_loss"], epoch)
    writer.add_scalar("Val_loss_Average/Angleloss", average["Angleloss"], epoch)
    writer.add_scalar("Val_loss_Average/total_loss", average["total_loss"], epoch)
    # Add descriptor loss
    if "descriptor_loss" in average.keys():
        key = "descriptor_loss"
        writer.add_scalar("Val_loss_Average/%s" % (key), average[key], epoch)

    # Average metrics
    writer.add_scalar("Val_metrics/junc_precision", average["junc_precision"],
                      epoch)
    writer.add_scalar("Val_metrics/junc_precision_nms",
                      average["junc_precision_nms"], epoch)
    writer.add_scalar("Val_metrics/junc_recall",
                      average["junc_recall"], epoch)
    writer.add_scalar("Val_metrics/junc_recall_nms",
                      average["junc_recall_nms"], epoch)
    writer.add_scalar("Val_metrics/heatmap_precision",
                      average["heatmap_precision"], epoch)
    writer.add_scalar("Val_metrics/heatmap_recall",
                      average["heatmap_recall"], epoch)
    # Add descriptor metric
    if "matching_score" in average.keys():
        writer.add_scalar("Val_metrics/matching_score",
                          average["matching_score"], epoch)


def plot_junction_detection(image_tensor, junc_pred_tensor,
                            junc_pred_nms_tensor, junc_gt_tensor):
    """ Plot the junction points on images. """
    # Get the batch_size
    batch_size = image_tensor.shape[0]

    # Process through batch dimension
    junc_pred_lst = []
    junc_pred_nms_lst = []
    junc_gt_lst = []
    for i in range(batch_size):
        # Convert image to 255 uint8
        image = (image_tensor[i, :, :, :]
                 * 255.).astype(np.uint8).transpose(1, 2, 0)

        # Plot groundtruth onto image
        junc_gt = junc_gt_tensor[i, ...]
        coord_gt = np.where(junc_gt.squeeze() > 0)
        points_gt = np.concatenate((coord_gt[0][..., None],
                                    coord_gt[1][..., None]),
                                   axis=1)
        plot_gt = image.copy()
        for id in range(points_gt.shape[0]):
            cv2.circle(plot_gt, tuple(np.flip(points_gt[id, :])), 3,
                       color=(255, 0, 0), thickness=2)
        junc_gt_lst.append(plot_gt[None, ...])

        # Plot junc_pred
        junc_pred = junc_pred_tensor[i, ...]
        coord_pred = np.where(junc_pred > 0)
        points_pred = np.concatenate((coord_pred[0][..., None],
                                      coord_pred[1][..., None]),
                                     axis=1)
        plot_pred = image.copy()
        for id in range(points_pred.shape[0]):
            cv2.circle(plot_pred, tuple(np.flip(points_pred[id, :])), 3,
                       color=(0, 255, 0), thickness=2)
        junc_pred_lst.append(plot_pred[None, ...])

        # Plot junc_pred_nms
        junc_pred_nms = junc_pred_nms_tensor[i, ...]
        coord_pred_nms = np.where(junc_pred_nms > 0)
        points_pred_nms = np.concatenate((coord_pred_nms[0][..., None],
                                          coord_pred_nms[1][..., None]),
                                         axis=1)
        plot_pred_nms = image.copy()
        for id in range(points_pred_nms.shape[0]):
            cv2.circle(plot_pred_nms, tuple(np.flip(points_pred_nms[id, :])),
                       3, color=(0, 255, 0), thickness=2)
        junc_pred_nms_lst.append(plot_pred_nms[None, ...])

    return {"junc_gt_plot": np.concatenate(junc_gt_lst, axis=0),
            "junc_pred_plot": np.concatenate(junc_pred_lst, axis=0),
            "junc_pred_nms_plot": np.concatenate(junc_pred_nms_lst, axis=0)}
