"""
Loss function implementations.
"""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.functional import  pixel_shuffle, softmax
from kornia.geometry import warp_perspective

from misc.geometry_utils import (keypoints_to_grid, get_dist_mask,
                                 get_common_line_mask)

from scipy.ndimage import distance_transform_edt


# loss_weight={"w_junc" "w_heatmap" "w_desc"}
# loss_func={"junc_loss" "heatmap_loss" "descriptor_loss"}
def get_loss_and_weights(model_cfg, device=torch.device("cuda")):  # torch.device指定具体的设备，分配张量或者模型
    """ Get loss functions and either static or dynamic weighting. """
    # Get the global weighting policy
    # 返回字典中查找的键"wighting_policy"的值，如果键不存在则返回默认值"static"
    w_policy = model_cfg.get("weighting_policy", "static")  # dynamic
    if not w_policy in ["static", "dynamic"]:
        raise ValueError("[Error] Not supported weighting policy.")

    loss_func = {}
    loss_weight = {}
    # Get junction loss function and weight
    # tenor或者paramter，JunctionDetectionLoss类的实例化对象
    w_junc, junc_loss_func = get_junction_loss_and_weight(model_cfg, w_policy)
    # 将所有最开始读取数据时的tensor变量copy一份到device指定的GPU上去，之后运算都在GPU上进行
    loss_func["junc_loss"] = junc_loss_func.to(device)
    loss_weight["w_junc"] = w_junc

    # Get heatmap loss function and weight
    # tenor或者paramter，HeatmapLoss类的实例化对象
    w_heatmap, heatmap_loss_func = get_heatmap_loss_and_weight(
        model_cfg, w_policy, device)
    loss_func["heatmap_loss"] = heatmap_loss_func.to(device)
    loss_weight["w_heatmap"] = w_heatmap

    w_angle, angle_loss_func = get_angle_loss_and_weight(model_cfg, w_policy)
    loss_func["angle_loss"] = angle_loss_func.to(device)
    loss_weight["w_angle"] = w_angle

    # [Optionally] get descriptor loss function and weight
    if model_cfg.get("descriptor_loss_func", None) is not None:
        # tenor或者paramter，TripletDescriptorLoss类的实例化对象
        w_descriptor, descriptor_loss_func = get_descriptor_loss_and_weight(
            model_cfg, w_policy)
        loss_func["descriptor_loss"] = descriptor_loss_func.to(device)
        loss_weight["w_desc"] = w_descriptor

    # 返回损失函数字典，对应的是类对象，以及损失权重字典，对应的是tensor或者parameter变量
    return loss_func, loss_weight


def get_junction_loss_and_weight(model_cfg, global_w_policy):
    """ Get the junction loss function and weight. """
    junction_loss_cfg = model_cfg.get("junction_loss_cfg", {})

    # Get the junction loss weight
    w_policy = junction_loss_cfg.get("policy", global_w_policy)  # dynamic,如果不存在，则与weighting_policy一致
    if w_policy == "static":
        w_junc = torch.tensor(model_cfg["w_junc"], dtype=torch.float32)  # 0.
    elif w_policy == "dynamic":
        # nn.Parameter将不可训练的Tensor参数转化为可训练的parameter参数，并将参数绑定到module里面，成为module的可训练参数
        w_junc = nn.Parameter(
            torch.tensor(model_cfg["w_junc"], dtype=torch.float32),
            requires_grad=True)
    else:
        raise ValueError(
            "[Error] Unknown weighting policy for junction loss weight.")

    # Get the junction loss function
    junc_loss_name = model_cfg.get("junction_loss_func", "superpoint")
    loss_type = model_cfg.get("return_type", "single")
    if junc_loss_name == "superpoint":
        if loss_type == "single":
            junc_loss_func = JunctionDetectionLoss(model_cfg["grid_size"],  # 8
                                                   model_cfg["keep_border_valid"])  # True
        else:
            junc_loss_func = RepJunctionDetectionLoss(model_cfg["grid_size"],  # 8
                                                   model_cfg["keep_border_valid"])  # True
    else:
        raise ValueError("[Error] Not supported junction loss function.")

    return w_junc, junc_loss_func


def get_heatmap_loss_and_weight(model_cfg, global_w_policy, device):
    """ Get the heatmap loss function and weight. """
    heatmap_loss_cfg = model_cfg.get("heatmap_loss_cfg", {})

    # Get the heatmap loss weight
    w_policy = heatmap_loss_cfg.get("policy", global_w_policy)
    if w_policy == "static":
        w_heatmap = torch.tensor(model_cfg["w_heatmap"], dtype=torch.float32)  # 0.
    elif w_policy == "dynamic":
        w_heatmap = nn.Parameter(
            torch.tensor(model_cfg["w_heatmap"], dtype=torch.float32),
            requires_grad=True)
    else:
        raise ValueError(
            "[Error] Unknown weighting policy for junction loss weight.")

    # Get the corresponding heatmap loss based on the config
    heatmap_loss_name = model_cfg.get("heatmap_loss_func", "cross_entropy")
    loss_type = model_cfg.get("return_type", "single")
    if heatmap_loss_name == "cross_entropy":
        # Get the heatmap class weight (always static)
        heatmap_class_w = model_cfg.get("w_heatmap_class", 1.)  # 1
        class_weight = torch.tensor(
            np.array([1., heatmap_class_w])).to(torch.float).to(device)
        # HeatmapLoss类的实例化对象
        if loss_type == "single":
            heatmap_loss_func = HeatmapLoss(class_weight=class_weight)
        else:
            heatmap_loss_func = RepHeatmapLoss(class_weight=class_weight)
    else:
        raise ValueError("[Error] Not supported heatmap loss function.")

    return w_heatmap, heatmap_loss_func

def get_angle_loss_and_weight(model_cfg, global_w_policy):
    """ Get the angle loss function and weight. """
    w_policy = global_w_policy
    if w_policy == "static":
        w_angle = torch.tensor(model_cfg["w_angle"], dtype=torch.float32)  # 0.
    elif w_policy == "dynamic":
        # nn.Parameter将不可训练的Tensor参数转化为可训练的parameter参数，并将参数绑定到module里面，成为module的可训练参数
        w_angle = nn.Parameter(
            torch.tensor(model_cfg["w_angle"], dtype=torch.float32),
            requires_grad=True)
    else:
        raise ValueError(
            "[Error] Unknown weighting policy for junction loss weight.")

    # Get the junction loss function
    angle_loss_func = AngleLoss()

    return w_angle, angle_loss_func


def get_descriptor_loss_and_weight(model_cfg, global_w_policy):
    """ Get the descriptor loss function and weight. """
    descriptor_loss_cfg = model_cfg.get("descriptor_loss_cfg", {})

    # Get the descriptor loss weight
    w_policy = descriptor_loss_cfg.get("policy", global_w_policy)  # dynamic
    if w_policy == "static":
        w_descriptor = torch.tensor(model_cfg["w_desc"], dtype=torch.float32)  # 0.
    elif w_policy == "dynamic":
        w_descriptor = nn.Parameter(torch.tensor(model_cfg["w_desc"],  # 0.
                                                 dtype=torch.float32), requires_grad=True)
    else:
        raise ValueError(
            "[Error] Unknown weighting policy for descriptor loss weight.")

    # Get the descriptor loss function
    descriptor_loss_name = model_cfg.get("descriptor_loss_func",  # regular_sampling
                                         "regular_sampling")
    if descriptor_loss_name == "regular_sampling":
        # TripletDescriptorLoss类的实例化对象
        descriptor_loss_func = TripletDescriptorLoss(
            descriptor_loss_cfg["grid_size"],  # 4
            descriptor_loss_cfg["dist_threshold"],  # 8
            descriptor_loss_cfg["margin"])  # 1
    else:
        raise ValueError("[Error] Not supported descriptor loss function.")

    return w_descriptor, descriptor_loss_func


# 下采样，尺寸变小为原来的1/grid_size，通道变大为原来的grid_size**2
def space_to_depth(input_tensor, grid_size):
    """ PixelUnshuffle for pytorch. """
    N, C, H, W = input_tensor.size()
    # (N, C, H//bs, bs, W//bs, bs)
    # .view改变tensor的形状
    x = input_tensor.view(N, C, H // grid_size, grid_size, W // grid_size, grid_size)
    # (N, bs, bs, C, H//bs, W//bs)
    # permute维度换位 contiguouos()把tensor变成在内存中连续分布的形式
    x = x.permute(0, 3, 5, 1, 2, 4).contiguous()
    # (N, C*bs^2, H//bs, W//bs)
    x = x.view(N, C * (grid_size ** 2), H // grid_size, W // grid_size)
    return x

class RepeatabilityLoss(nn.Module):
    """
    Try to make the repeatability repeatable from one image to the other.
    """
    def __init__(self, N=16, flag=0):
        nn.Module.__init__(self)
        self.name = f'Rep{N}'
        # 将一个 B C H W 的张量上滑动窗口，将窗口中的数据展平成一个一维张量
        # 即 B C H W -> B C*N*N D (D,是滑窗遍历的所有位置的数量)
        self.patches = nn.Unfold(N, padding=0, stride=N // 2)
        # 0 则 CosimLoss+PeakyLoss，1 则只有CosimLoss
        self.flag = flag

    def extract_patches(self, sal):
        # B C*N*N D -> B D C*N*N
        patches = self.patches(sal).transpose(1, 2)  # flatten
        return patches

    def CosimLoss(self, patches1, patches2):
        patches_norm1 = F.normalize(patches1, p=2, dim=1)
        patches_norm2 = F.normalize(patches2, p=2, dim=1)
        cosim = (patches_norm1*patches_norm2).sum(dim=1)
        return 1 - cosim.mean()

    def PeakyLoss(self, patches1, patches2):
        # 对每个窗口的数据取最大值
        max_value1 = patches1.max(dim=1).values
        max_value2 = patches2.max(dim=1).values

        # 对每个窗口的结果取均值
        mean_value1 = patches1.mean(dim=1)
        mean_value2 = patches2.mean(dim=1)

        res1 = max_value1 - mean_value1
        res2 = max_value2 - mean_value2

        return ((1 - res1.mean()) + (1 - res2.mean())) / 2

    def MeanLoss(self, patches1, patches2):
        # 对每个窗口的数据取最小值
        min_value1 = patches1.min(dim=1).values
        min_value2 = patches2.min(dim=1).values

        # 对每个窗口的结果取均值
        mean_value1 = patches1.mean(dim=1)
        mean_value2 = patches2.mean(dim=1)

        res1 = mean_value1 - min_value1
        res2 = mean_value2 - min_value2

        return ((1 - res1.mean()) + (1 - res2.mean())) / 2

    def forward(self, sali1, sali2, mask):
        patches1 = self.extract_patches(sali1)
        patches2 = self.extract_patches(sali2)

        patch_mask = self.patches(mask.float()).transpose(1, 2)
        # 判断每个窗口是否全为0
        all_zero_windows = (patch_mask.sum(dim=2) == 0)

        # 获取不全为0的窗口的索引
        non_zero_indices = torch.nonzero(~all_zero_windows, as_tuple=False)

        # 根据 索引从 patches 中选择对应的张量 M C*N*N(M是窗口不全为0的窗口数量)
        patches1 = patches1[non_zero_indices[:, 0], non_zero_indices[:, 1], :]
        patches2 = patches2[non_zero_indices[:, 0], non_zero_indices[:, 1], :]

        if patches1.numel() == 0 or patches2.numel() == 0:
            return 0.5, 0, 0

        cosim_loss = self.CosimLoss(patches1, patches2)
        if self.flag == 0:
            peaky_loss = self.PeakyLoss(patches1,patches2)
        elif self.flag == 1:
            peaky_loss = self.MeanLoss(patches1, patches2)
        else:
            peaky_loss = 0.

        return cosim_loss + peaky_loss, cosim_loss, peaky_loss


# 输入groundtruh连接点图(原图大小)，输入连接点预测图(1/8)，返回torch.float类型的对本次所有batch和patch的一个loss值
def junction_detection_loss(junction_map, junc_predictions, valid_mask=None,
                            grid_size=8, keep_border=True):
    """ Junction detection loss. """
    # Convert junc_map to channel tensor
    junc_map = space_to_depth(junction_map, grid_size)  # 下采样 NxC*g**2xH/gxW/g
    map_shape = junc_map.shape[-2:]
    batch_size = junc_map.shape[0]
    dust_bin_label = torch.ones(
        [batch_size, 1, map_shape[0],
         map_shape[1]]).to(junc_map.device).to(torch.int)
    # junc_map*2保证判断点的通道值大于dust_bin_label的值
    junc_map = torch.cat([junc_map * 2, dust_bin_label], dim=1)  # NxC*g**2+1xH/gxW/g
    # 返回通道维度的最大值的序号
    # torch.distributions.Uniform(0,0.1)从[0,0.1)生成均匀分布随机样本
    # 若8x8区域有多个点，则随机选择,加上均匀分布的值是为了实现这一点
    labels = torch.argmax(
        junc_map.to(torch.float) +
        torch.distributions.Uniform(0, 0.1).sample(junc_map.shape).to(junc_map.device),
        dim=1)

    # Also convert the valid mask to channel tensor
    valid_mask = (torch.ones(junction_map.shape) if valid_mask is None  # NxCxH/gxW/g
                  else valid_mask)
    valid_mask = space_to_depth(valid_mask, grid_size)  # NxC*g**2+1xH/gxW/g

    # Compute junction loss on the border patch or not
    # 在通道维度上求和
    if keep_border:
        # 通道之和>0为true，否则为false
        valid_mask = torch.sum(valid_mask.to(torch.bool).to(torch.int),  # Nx1xH/gxW/g
                               dim=1, keepdim=True) > 0
    else:
        valid_mask = torch.sum(valid_mask.to(torch.bool).to(torch.int),
                               dim=1, keepdim=True) >= grid_size * grid_size

    # Compute the classification loss
    # 交叉熵损失函数 整合了nn.logSoftmax()和nn.NLLLoss()
    loss_func = nn.CrossEntropyLoss(reduction="none")
    # The loss still need NCHW format
    # loss-NxH/gxW/g
    loss = loss_func(input=junc_predictions,
                     target=labels.to(torch.long))

    valid_mask[:,:,:,:] = 1                             # mask都为1，即不使用mask

    # Weighted sum by the valid mask
    # squeeze，若指定维度为1则维度去除   NxH/gxW/g
    # 将loss与valid_mask相乘后，将每个patch的值加在一起，得到一个浮点型数值
    loss_ = torch.sum(loss * torch.squeeze(valid_mask.to(torch.float),
                                           dim=1), dim=[0, 1, 2])
    # torch.sum()没有定义dim，则将所有维度数值加起来输出一个数值
    loss_final = loss_ / torch.sum(torch.squeeze(valid_mask.to(torch.float),
                                                 dim=1))


    return loss_final


def angle_loss(angle_gt, angle_pred):


    background_loss = torch.where(angle_gt < 0, -torch.log(torch.clamp(1. - (((angle_pred-angle_pred.abs())/2. - angle_gt).abs() / 10.), 0.01, 0.99)),
                                   torch.tensor(0.0).cuda())
    target_loss = torch.where(angle_gt >= 0., torch.minimum((angle_pred - angle_gt).abs(),
                                        (180 - (angle_pred - angle_gt).abs()).abs()) / 90., torch.tensor(0.0).cuda())

    background_num = (angle_gt < 0).sum(dim=(0, 1, 2, 3)) + 1e-6
    target_num = (angle_gt >= 0).sum(dim=(0, 1, 2, 3)) + 1e-6


    background_loss = torch.sum(background_loss, dim=(0, 1, 2, 3)) / background_num
    target_loss = torch.sum(target_loss, dim=(0, 1, 2, 3)) / target_num

    angle_loss = background_loss + target_loss


    return angle_loss



# 输入gt和预测，返回对于每个patch的一个平均loss，即每个batch有一个loss
def heatmap_loss(heatmap_gt, heatmap_pred, valid_mask=None,
                 class_weight=None):  # class_weight=[1.,1.]
    """ Heatmap prediction loss. """
    # Compute the classification loss on each pixel
    if class_weight is None:
        # reduction="none"   直接返回n个样本的loss
        loss_func = nn.CrossEntropyLoss(reduction="none")
    else:
        # class_weight   确定每个类别的权重 n类则n个元素 默认各类权重相同
        loss_func = nn.CrossEntropyLoss(class_weight, reduction="none")

    # heatmap_pred B C=2 H W
    loss = loss_func(input=heatmap_pred,
                     target=torch.squeeze(heatmap_gt.to(torch.long), dim=1))

    # Weighted sum by the valid mask
    # Sum over H and W
    loss_spatial_sum = torch.sum(loss * torch.squeeze(
        valid_mask.to(torch.float), dim=1), dim=[1, 2])
    valid_spatial_sum = torch.sum(torch.squeeze(valid_mask.to(torch.float32),
                                                dim=1), dim=[1, 2])
    # Mean to single scalar over batch dimension
    loss = torch.sum(loss_spatial_sum) / torch.sum(valid_spatial_sum)

    return loss


class JunctionDetectionLoss(nn.Module):
    """ Junction detection loss. """

    def __init__(self, grid_size, keep_border):
        super(JunctionDetectionLoss, self).__init__()
        self.grid_size = grid_size  # 8
        self.keep_border = keep_border  # True

    def forward(self, prediction, target, valid_mask=None):
        return junction_detection_loss(target, prediction, valid_mask,
                                       self.grid_size, self.keep_border)


class RepJunctionDetectionLoss(nn.Module):
    """ Repeatability Junction detection loss. """
    def __init__(self, grid_size, keep_border):
        super(RepJunctionDetectionLoss, self).__init__()
        self.grid_size = grid_size  # 8
        self.keep_border = keep_border  # True
        self.reploss = RepeatabilityLoss(N=8, flag=0)

    def forward(self, pred1, target1, pred2, target2, homo,
                valid_mask1=None, valid_mask2=None):
        batch_size, _, H, W = target1.shape

        junc_loss = junction_detection_loss(
            torch.cat([target1, target2], dim=0),
            torch.cat([pred1, pred2], dim=0),
            torch.cat([valid_mask1, valid_mask2], dim=0),
            self.grid_size,
            self.keep_border,
        )

        if valid_mask1 is None:
            valid_mask1 = torch.ones([batch_size, 1, H, W], device=pred1.device)

        if valid_mask2 is None:
            valid_mask2 = torch.ones([batch_size, 1, H, W], device=pred2.device)

        junc_prob1 = pixel_shuffle(softmax(pred1, dim=1)[:, :-1, :, :], self.grid_size)

        junc_warped2 = pixel_shuffle(softmax(pred2, dim=1)[:, :-1, :, :], self.grid_size)
        junc_prob2 = warp_perspective(junc_warped2 * valid_mask2,
                                      homo, (H, W), flags="nearest")
        valid2 = warp_perspective(valid_mask2.to(torch.float32), homo, (H, W), flags="nearest")
        valid2 = valid2.to(torch.int)

        valid = valid_mask1 & valid2

        # 找到 junc_prob1 和 junc_prob2 中同时为0 的位置
        indices = ((junc_prob1 * valid) == 0) & ((junc_prob2 * valid) == 0)
        valid[indices] = 0

        rep_loss, cosim_loss, peaky_loss = self.reploss(junc_prob1 * valid, junc_prob2 * valid, valid)

        return junc_loss + rep_loss, junc_loss, cosim_loss, peaky_loss


class HeatmapLoss(nn.Module):
    """ Heatmap prediction loss. """

    def __init__(self, class_weight):
        super(HeatmapLoss, self).__init__()
        self.class_weight = class_weight  # [1.,1.]

    def forward(self, prediction, target, valid_mask=None):
        return heatmap_loss(target, prediction, valid_mask, self.class_weight)


class RepHeatmapLoss(nn.Module):
    """ Heatmap prediction loss. """

    def __init__(self, class_weight):
        super(RepHeatmapLoss, self).__init__()
        self.class_weight = class_weight  # [1.,1.]
        self.reploss = RepeatabilityLoss(N=16, flag=0)

    def forward(self, pred1, target1, pred2, target2, homo,
                valid_mask1=None, valid_mask2=None):
        batch_size, _, H, W = target1.shape

        heat_loss = heatmap_loss(
            torch.cat([target1, target2], dim=0),
            torch.cat([pred1, pred2], dim=0),
            torch.cat([valid_mask1, valid_mask2], dim=0),
            self.class_weight
        )

        if pred1.shape[1] == 2:
            heatmap_prob1 = softmax(pred1, dim=1)[:, 1:, :, :]
            heatmap_warped2 = softmax(pred2, dim=1)[:, 1:, :, :]
        else:
            heatmap_prob1 = torch.sigmoid(pred1)
            heatmap_warped2 = torch.sigmoid(pred2)

        if valid_mask1 is None:
            valid_mask1 = torch.ones([batch_size, 1, H, W], device=pred1.device)

        if valid_mask2 is None:
            valid_mask2 = torch.ones([batch_size, 1, H, W], device=pred2.device)

        heatmap_prob2 = warp_perspective(heatmap_warped2 * valid_mask2,
                                         homo, (H, W), flags="nearest")
        valid2 = warp_perspective(valid_mask2.to(torch.float32), homo, (H, W), flags="nearest")
        valid2 = valid2.to(torch.int)


        valid = valid_mask1 & valid2


        # 找到 heatmap_prob1 和 heatmap_prob2 中同时为0 的位置
        indices = ((heatmap_prob1 * valid) == 0) & ((heatmap_prob2 * valid) == 0)
        valid[indices] = 0

        rep_loss, cosim_loss, peaky_loss = self.reploss(heatmap_prob1 * valid, heatmap_prob2 * valid, valid)

        return heat_loss + rep_loss, heat_loss, cosim_loss, peaky_loss


class AngleLoss(nn.Module):
    def __init__(self):
        super(AngleLoss, self).__init__()

    def forward(self, angle0, angle1, angle2, angle_gt):
        gt1 = F.interpolate(angle_gt, scale_factor=0.5, mode='bilinear', align_corners=False)
        gt2 = F.interpolate(gt1, scale_factor=0.5, mode='bilinear', align_corners=False)

        angle_loss0 = angle_loss(angle_gt, angle0)
        angle_loss1 = angle_loss(gt1, angle1)
        angle_loss2 = angle_loss(gt2, angle2)
        angleloss = angle_loss0 + angle_loss1 + angle_loss2

        return angleloss, angle_loss0, angle_loss1, angle_loss2


class RegularizationLoss(nn.Module):
    """ Module for regularization loss. """

    def __init__(self):
        super(RegularizationLoss, self).__init__()
        self.name = "regularization_loss"
        self.loss_init = torch.zeros([])

    def forward(self, loss_weights):
        # Place it to the same device
        loss = self.loss_init.to(loss_weights["w_heatmap"].device)
        for _, val in loss_weights.items():
            if isinstance(val, nn.Parameter):
                loss += val
                #print(val)

        return loss


def triplet_loss(desc_pred1, desc_pred2, points1, points2, line_indices,
                 epoch, grid_size=8, dist_threshold=8,  # grid_size=4
                 init_dist_threshold=64, margin=1):
    """ Regular triplet loss for descriptor learning. """
    b_size, _, Hc, Wc = desc_pred1.size()
    img_size = (Hc * grid_size, Wc * grid_size)
    device = desc_pred1.device

    # Extract valid keypoints
    n_points = line_indices.size()[1]
    valid_points = line_indices.bool().flatten()
    n_correct_points = torch.sum(valid_points).item()
    if n_correct_points == 0:
        return torch.tensor(0., dtype=torch.float, device=device)

    # Check which keypoints are too close to be matched
    # dist_threshold is decreased at each epoch for easier training
    dist_threshold = max(dist_threshold,
                         2 * init_dist_threshold // (epoch + 1))
    dist_mask = get_dist_mask(points1, points2, valid_points, dist_threshold)

    # Additionally ban negative mining along the same line
    common_line_mask = get_common_line_mask(line_indices, valid_points)
    dist_mask = dist_mask | common_line_mask

    # Convert the keypoints to a grid suitable for interpolation
    grid1 = keypoints_to_grid(points1, img_size)
    grid2 = keypoints_to_grid(points2, img_size)

    # Extract the descriptors
    desc1 = F.grid_sample(desc_pred1, grid1).permute(
        0, 2, 3, 1).reshape(b_size * n_points, -1)[valid_points]
    # b_size*n_points x 128
    desc1 = F.normalize(desc1, dim=1)
    desc2 = F.grid_sample(desc_pred2, grid2).permute(
        0, 2, 3, 1).reshape(b_size * n_points, -1)[valid_points]
    desc2 = F.normalize(desc2, dim=1)
    desc_dists = 2 - 2 * (desc1 @ desc2.t())

    # Positive distance loss
    pos_dist = torch.diag(desc_dists)

    # Negative distance loss
    max_dist = torch.tensor(4., dtype=torch.float, device=device)
    desc_dists[
        torch.arange(n_correct_points, dtype=torch.long),
        torch.arange(n_correct_points, dtype=torch.long)] = max_dist
    desc_dists[dist_mask] = max_dist
    neg_dist = torch.min(torch.min(desc_dists, dim=1)[0],
                         torch.min(desc_dists, dim=0)[0])

    triplet_loss = F.relu(margin + pos_dist - neg_dist)
    return triplet_loss, grid1, grid2, valid_points


class TripletDescriptorLoss(nn.Module):
    """ Triplet descriptor loss. """

    def __init__(self, grid_size, dist_threshold, margin):
        super(TripletDescriptorLoss, self).__init__()
        self.grid_size = grid_size  # 4
        self.init_dist_threshold = 64
        self.dist_threshold = dist_threshold  # 8
        self.margin = margin  # 1

    def forward(self, desc_pred1, desc_pred2, points1,
                points2, line_indices, epoch):
        return self.descriptor_loss(desc_pred1, desc_pred2, points1,
                                    points2, line_indices, epoch)

    # The descriptor loss based on regularly sampled points along the lines
    def descriptor_loss(self, desc_pred1, desc_pred2, points1,
                        points2, line_indices, epoch):
        return torch.mean(triplet_loss(
            desc_pred1, desc_pred2, points1, points2, line_indices, epoch,
            self.grid_size, self.dist_threshold, self.init_dist_threshold,
            self.margin)[0])


class TotalLoss(nn.Module):
    """ Total loss summing junction, heatma, descriptor
        and regularization losses. """

    def __init__(self, loss_funcs, loss_weights, weighting_policy):
        super(TotalLoss, self).__init__()
        # Whether we need to compute the descriptor loss
        self.compute_descriptors = "descriptor_loss" in loss_funcs.keys()

        self.loss_funcs = loss_funcs
        self.loss_weights = loss_weights
        self.weighting_policy = weighting_policy

        # Always add regularization loss (it will return zero if not used)
        # 添加所有nn.Parameter类型的损失权重参数
        self.loss_funcs["reg_loss"] = RegularizationLoss().cuda()

    def forward(self, junc_pred, junc_target, heatmap_pred,
                heatmap_target, angle0, angle1, angle2, angle_target,
                valid_mask=None):
        """ Detection only loss. """
        # Compute the junction loss
        junc_loss = self.loss_funcs["junc_loss"](junc_pred, junc_target,
                                                 valid_mask)

        # Compute the heatmap loss
        heatmap_loss = self.loss_funcs["heatmap_loss"](
            heatmap_pred, heatmap_target, valid_mask)

        # Compute the angle loss
        angle_loss, angle_loss0, angle_loss1, angle_loss2 = self.loss_funcs["angle_loss"](
            angle0, angle1, angle2, angle_target)

        # Compute the total loss.
        if self.weighting_policy == "dynamic":
            reg_loss = self.loss_funcs["reg_loss"](self.loss_weights)

            total_loss = junc_loss * torch.exp(-self.loss_weights["w_junc"]) + \
                         heatmap_loss * torch.exp(-self.loss_weights["w_heatmap"]) + \
                         angle_loss * torch.exp(-self.loss_weights["w_angle"]) + \
                         reg_loss

            junc_cross = 0.
            junc_cosim = 0.
            junc_peaky = 0.
            heat_cross = 0.
            heat_cosim = 0.
            heat_peaky = 0.

            return {
                "total_loss": total_loss,
                "junc_loss": junc_loss,
                "junc_cross": junc_cross,
                "junc_cosim": junc_cosim,
                "junc_peaky": junc_peaky,
                "heatmap_loss": heatmap_loss,
                "heat_cross": heat_cross,
                "heat_cosim": heat_cosim,
                "heat_peaky": heat_peaky,
                "angle_loss": angle_loss,
                "angle_loss0": angle_loss0,
                "angle_loss1": angle_loss1,
                "angle_loss2": angle_loss2,
                "reg_loss": reg_loss,
                "w_junc": torch.exp(-self.loss_weights["w_junc"]).item(),
                "w_heatmap": torch.exp(-self.loss_weights["w_heatmap"]).item(),
                "w_angle": torch.exp(-self.loss_weights["w_angle"]).item(),
            }

        elif self.weighting_policy == "static":
            total_loss = junc_loss * self.loss_weights["w_junc"] + \
                         heatmap_loss * self.loss_weights["w_heatmap"] + \
                         angle_loss * self.loss_weights["w_angle"]

            reg_loss = self.loss_funcs["reg_loss"](self.loss_weights)

            junc_cross = 0.
            junc_cosim = 0.
            junc_peaky = 0.
            heat_cross = 0.
            heat_cosim = 0.
            heat_peaky = 0.

            return {
                "total_loss": total_loss,
                "junc_loss": junc_loss,
                "junc_cross": junc_cross,
                "junc_cosim": junc_cosim,
                "junc_peaky": junc_peaky,
                "heatmap_loss": heatmap_loss,
                "heat_cross": heat_cross,
                "heat_cosim": heat_cosim,
                "heat_peaky": heat_peaky,
                "angle_loss": angle_loss,
                "angle_loss0": angle_loss0,
                "angle_loss1": angle_loss1,
                "angle_loss2": angle_loss2,
                "reg_loss": reg_loss,
                "w_junc": self.loss_weights["w_junc"].item(),
                "w_heatmap": self.loss_weights["w_heatmap"].item(),
                "w_angle": self.loss_weights["w_angle"].item(),
            }

        else:
            raise ValueError("[Error] Unknown weighting policy.")

    def forward_paired(self,
                            junc_map_pred1, junc_map_pred2, junc_map_target1,
                            junc_map_target2, heatmap_pred1, heatmap_pred2, heatmap_target1,
                            heatmap_target2, angle0_1, angle1_1, angle2_1,
                            angle0_2, angle1_2, angle2_2, angle_target1, angle_target2,
                            homo, valid_mask1=None, valid_mask2=None,
                            ):
        """ Loss for detection + description. """
        # Compute junction loss
        junc_loss, junc_cross, junc_cosim, junc_peaky = self.loss_funcs["junc_loss"](
            junc_map_pred1, junc_map_target1, junc_map_pred2, junc_map_target2,
            homo, valid_mask1, valid_mask2
        )
        # Get junction loss weight (dynamic or not)
        if isinstance(self.loss_weights["w_junc"], nn.Parameter):
            w_junc = torch.exp(-self.loss_weights["w_junc"])
        else:
            w_junc = self.loss_weights["w_junc"]

        # Compute heatmap loss
        heatmap_loss, heat_cross, heat_cosim, heat_peaky = self.loss_funcs["heatmap_loss"](
            heatmap_pred1, heatmap_target1, heatmap_pred2, heatmap_target2,
            homo, valid_mask1, valid_mask2
        )
        # Get heatmap loss weight (dynamic or not)
        if isinstance(self.loss_weights["w_heatmap"], nn.Parameter):
            w_heatmap = torch.exp(-self.loss_weights["w_heatmap"])
        else:
            w_heatmap = self.loss_weights["w_heatmap"]

        # Compute angle loss
        angle_loss, angle_loss0, angle_loss1, angle_loss2 = self.loss_funcs["angle_loss"](
            torch.cat([angle0_1, angle0_2], dim=0),
            torch.cat([angle1_1, angle1_2], dim=0),
            torch.cat([angle2_1, angle2_2], dim=0),
            torch.cat([angle_target1, angle_target2], dim=0),
        )
        # Get angle loss weight (dynamic or not)
        if isinstance(self.loss_weights["w_angle"], nn.Parameter):
            w_angle = torch.exp(-self.loss_weights["w_angle"])
        else:
            w_angle = self.loss_weights["w_angle"]


        # Update the total loss
        total_loss = (junc_loss * w_junc
                      + heatmap_loss * w_heatmap
                      + angle_loss * w_angle)

        outputs = {
            "junc_loss": junc_loss,
            "junc_cross": junc_cross,
            "junc_cosim": junc_cosim,
            "junc_peaky": junc_peaky,
            "heatmap_loss": heatmap_loss,
            "heat_cross": heat_cross,
            "heat_cosim": heat_cosim,
            "heat_peaky": heat_peaky,
            "angle_loss": angle_loss,
            "angle_loss0": angle_loss0,
            "angle_loss1": angle_loss1,
            "angle_loss2": angle_loss2,
            "w_junc": w_junc.item() \
                if isinstance(w_junc, nn.Parameter) else w_junc,
            "w_heatmap": w_heatmap.item() \
                if isinstance(w_heatmap, nn.Parameter) else w_heatmap,
            "w_angle": w_angle.item() \
                if isinstance(w_angle, nn.Parameter) else w_angle,
        }

        # Compute the regularization loss
        reg_loss = self.loss_funcs["reg_loss"](self.loss_weights)
        total_loss += reg_loss
        outputs.update({
            "reg_loss": reg_loss,
            "total_loss": total_loss
        })

        return outputs

    def forward_descriptors_rep(self,
                            junc_map_pred1, junc_map_pred2, junc_map_target1,
                            junc_map_target2, heatmap_pred1, heatmap_pred2, heatmap_target1,
                            heatmap_target2, line_points1, line_points2, line_indices,
                            desc_pred1, desc_pred2, angle0_1, angle1_1, angle2_1,
                            angle0_2, angle1_2, angle2_2, angle_target1, angle_target2,
                            epoch, homo, valid_mask1=None, valid_mask2=None,
                            ):
        """ Loss for detection + description. """
        # Compute junction loss
        junc_loss, junc_cross, junc_cosim, junc_peaky = self.loss_funcs["junc_loss"](
            junc_map_pred1, junc_map_target1, junc_map_pred2, junc_map_target2,
            homo, valid_mask1, valid_mask2
        )
        # Get junction loss weight (dynamic or not)
        if isinstance(self.loss_weights["w_junc"], nn.Parameter):
            w_junc = torch.exp(-self.loss_weights["w_junc"])
        else:
            w_junc = self.loss_weights["w_junc"]

        # Compute heatmap loss
        heatmap_loss, heat_cross, heat_cosim, heat_peaky = self.loss_funcs["heatmap_loss"](
            heatmap_pred1, heatmap_target1, heatmap_pred2, heatmap_target2,
            homo, valid_mask1, valid_mask2
        )
        # Get heatmap loss weight (dynamic or not)
        if isinstance(self.loss_weights["w_heatmap"], nn.Parameter):
            w_heatmap = torch.exp(-self.loss_weights["w_heatmap"])
        else:
            w_heatmap = self.loss_weights["w_heatmap"]

        # Compute angle loss
        angle_loss, angle_loss0, angle_loss1, angle_loss2 = self.loss_funcs["angle_loss"](
            torch.cat([angle0_1, angle0_2], dim=0),
            torch.cat([angle1_1, angle1_2], dim=0),
            torch.cat([angle2_1, angle2_2], dim=0),
            torch.cat([angle_target1, angle_target2], dim=0),
        )
        # Get angle loss weight (dynamic or not)
        if isinstance(self.loss_weights["w_angle"], nn.Parameter):
            w_angle = torch.exp(-self.loss_weights["w_angle"])
        else:
            w_angle = self.loss_weights["w_angle"]

        # Compute the descriptor loss
        descriptor_loss = self.loss_funcs["descriptor_loss"](
            desc_pred1, desc_pred2, line_points1,
            line_points2, line_indices, epoch)
        # Get descriptor loss weight (dynamic or not)
        if isinstance(self.loss_weights["w_desc"], nn.Parameter):
            w_descriptor = torch.exp(-self.loss_weights["w_desc"])
        else:
            w_descriptor = self.loss_weights["w_desc"]

        # Update the total loss
        total_loss = (junc_loss * w_junc
                      + heatmap_loss * w_heatmap
                      + angle_loss * w_angle
                      + descriptor_loss * w_descriptor)

        outputs = {
            "junc_loss": junc_loss,
            "junc_cross": junc_cross,
            "junc_cosim": junc_cosim,
            "junc_peaky": junc_peaky,
            "heatmap_loss": heatmap_loss,
            "heat_cross": heat_cross,
            "heat_cosim": heat_cosim,
            "heat_peaky": heat_peaky,
            "angle_loss": angle_loss,
            "angle_loss0": angle_loss0,
            "angle_loss1": angle_loss1,
            "angle_loss2": angle_loss2,
            "w_junc": w_junc.item() \
                if isinstance(w_junc, nn.Parameter) else w_junc,
            "w_heatmap": w_heatmap.item() \
                if isinstance(w_heatmap, nn.Parameter) else w_heatmap,
            "w_angle": w_angle.item() \
                if isinstance(w_angle, nn.Parameter) else w_angle,
            "descriptor_loss": descriptor_loss,
            "w_desc": w_descriptor.item() \
                if isinstance(w_descriptor, nn.Parameter) else w_descriptor
        }

        # Compute the regularization loss
        reg_loss = self.loss_funcs["reg_loss"](self.loss_weights)
        total_loss += reg_loss
        outputs.update({
            "reg_loss": reg_loss,
            "total_loss": total_loss
        })

        return outputs

    def forward_descriptors(self,
                            junc_map_pred1, junc_map_pred2, junc_map_target1,
                            junc_map_target2, heatmap_pred1, heatmap_pred2, heatmap_target1,
                            heatmap_target2, line_points1, line_points2, line_indices,
                            desc_pred1, desc_pred2, angle0_1, angle1_1, angle2_1,
                            angle0_2, angle1_2, angle2_2, angle_target1, angle_target2,
                            epoch, valid_mask1=None, valid_mask2=None,
                            ):
        """ Loss for detection + description. """
        # Compute junction loss
        junc_loss = self.loss_funcs["junc_loss"](
            torch.cat([junc_map_pred1, junc_map_pred2], dim=0),
            torch.cat([junc_map_target1, junc_map_target2], dim=0),
            torch.cat([valid_mask1, valid_mask2], dim=0)
        )
        # Get junction loss weight (dynamic or not)
        if isinstance(self.loss_weights["w_junc"], nn.Parameter):
            w_junc = torch.exp(-self.loss_weights["w_junc"])
        else:
            w_junc = self.loss_weights["w_junc"]

        # Compute heatmap loss
        heatmap_loss = self.loss_funcs["heatmap_loss"](
            torch.cat([heatmap_pred1, heatmap_pred2], dim=0),
            torch.cat([heatmap_target1, heatmap_target2], dim=0),
            torch.cat([valid_mask1, valid_mask2], dim=0),
        )
        # Get heatmap loss weight (dynamic or not)
        if isinstance(self.loss_weights["w_heatmap"], nn.Parameter):
            w_heatmap = torch.exp(-self.loss_weights["w_heatmap"])
        else:
            w_heatmap = self.loss_weights["w_heatmap"]

        # Compute angle loss
        angle_loss, angle_loss0, angle_loss1, angle_loss2 = self.loss_funcs["angle_loss"](
            torch.cat([angle0_1, angle0_2], dim=0),
            torch.cat([angle1_1, angle1_2], dim=0),
            torch.cat([angle2_1, angle2_2], dim=0),
            torch.cat([angle_target1, angle_target2], dim=0),
        )
        # Get angle loss weight (dynamic or not)
        if isinstance(self.loss_weights["w_angle"], nn.Parameter):
            w_angle = torch.exp(-self.loss_weights["w_angle"])
        else:
            w_angle = self.loss_weights["w_angle"]

        # Compute the descriptor loss
        descriptor_loss = self.loss_funcs["descriptor_loss"](
            desc_pred1, desc_pred2, line_points1,
            line_points2, line_indices, epoch)
        # Get descriptor loss weight (dynamic or not)
        if isinstance(self.loss_weights["w_desc"], nn.Parameter):
            w_descriptor = torch.exp(-self.loss_weights["w_desc"])
        else:
            w_descriptor = self.loss_weights["w_desc"]

        # Update the total loss
        total_loss = (junc_loss * w_junc
                      + heatmap_loss * w_heatmap
                      + angle_loss * w_angle
                      + descriptor_loss * w_descriptor)

        junc_cross = 0.
        junc_cosim = 0.
        junc_peaky = 0.
        heat_cross = 0.
        heat_cosim = 0.
        heat_peaky = 0.

        outputs = {
            "junc_loss": junc_loss,
            "junc_cross": junc_cross,
            "junc_cosim": junc_cosim,
            "junc_peaky": junc_peaky,
            "heatmap_loss": heatmap_loss,
            "heat_cross": heat_cross,
            "heat_cosim": heat_cosim,
            "heat_peaky": heat_peaky,
            "angle_loss": angle_loss,
            "angle_loss0": angle_loss0,
            "angle_loss1": angle_loss1,
            "angle_loss2": angle_loss2,
            "w_junc": w_junc.item() \
                if isinstance(w_junc, nn.Parameter) else w_junc,
            "w_heatmap": w_heatmap.item() \
                if isinstance(w_heatmap, nn.Parameter) else w_heatmap,
            "w_angle": w_angle.item() \
                if isinstance(w_angle, nn.Parameter) else w_angle,
            "descriptor_loss": descriptor_loss,
            "w_desc": w_descriptor.item() \
                if isinstance(w_descriptor, nn.Parameter) else w_descriptor
        }

        # Compute the regularization loss
        reg_loss = self.loss_funcs["reg_loss"](self.loss_weights)
        total_loss += reg_loss
        outputs.update({
            "reg_loss": reg_loss,
            "total_loss": total_loss
        })

        return outputs

