# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Modules to compute the matching cost and solve the corresponding LSAP.
"""
import torch
from scipy.optimize import linear_sum_assignment
from torch import nn

from util.box_ops import box_cxcywh_to_xyxy, generalized_box_iou


class HungarianMatcher(nn.Module):
    """This class computes an assignment between the targets and the predictions of the network

    For efficiency reasons, the targets don't include the no_object. Because of this, in general,
    there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions,
    while the others are un-matched (and thus treated as non-objects).
    """

    def __init__(self, cost_class: float = 1, cost_bbox: float = 1, cost_giou: float = 1):
        """Creates the matcher

        Params:
            cost_class: This is the relative weight of the classification error in the matching cost
            cost_bbox: This is the relative weight of the L1 error of the bounding box coordinates in the matching cost
            cost_giou: This is the relative weight of the giou loss of the bounding box in the matching cost
        """
        super().__init__()
        self.cost_class = cost_class
        self.cost_bbox = cost_bbox
        self.cost_giou = cost_giou
        assert cost_class != 0 or cost_bbox != 0 or cost_giou != 0, "all costs cant be 0"

    @torch.no_grad()
    def forward(self, outputs, targets):
        """ Performs the matching

        Params:
            outputs: This is a dict that contains at least these entries:
                 "pred_logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits
                 "pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates

            targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing:
                 "labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth
                           objects in the target) containing the class labels
                 "boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates

        Returns:
            A list of size batch_size, containing tuples of (index_i, index_j) where:
                - index_i is the indices of the selected predictions (in order)
                - index_j is the indices of the corresponding selected targets (in order)
            For each batch element, it holds:
                len(index_i) = len(index_j) = min(num_queries, num_target_boxes)
        """
        bs, num_queries = outputs["pred_logits"].shape[:2]

        # We flatten to compute the cost matrices in a batch
        out_prob = outputs["pred_logits"].flatten(0, 1).softmax(-1)  # [batch_size * num_queries, num_classes]
        out_bbox = outputs["pred_boxes"].flatten(0, 1)  # [batch_size * num_queries, 4]

        # Also concat the target labels and boxes
        # targets 的长度等于batch size
        # 每个元素是包含labels和boxes
        # labels的维度是[num_target_boxes]，其中num_target_boxes是该target的gt的数量
        # boxes的维度是[num_target_boxes, 4]
        tgt_ids = torch.cat([v["labels"] for v in targets])
        tgt_bbox = torch.cat([v["boxes"] for v in targets])
        # >>> x = torch.randn(2)
        # tensor([ 1.7102, -0.1177])
        # >>> y = torch.randn(3)
        # tensor([-0.4304, -0.1580, -0.7652])
        # torch.cat([x,y])
        # tensor([ 1.7102, -0.1177, -0.4304, -0.1580, -0.7652])
        # tgt_ids的形状是[第一个样本的目标数量 + 第二个样本的目标数量 + ... + 第n个样本的目标数量]

        # Compute the classification cost. Contrary to the loss, we don't use the NLL,
        # but approximate it in 1 - proba[target class].
        # The 1 is a constant that doesn't change the matching, it can be ommitted.
        cost_class = -out_prob[:, tgt_ids]
        # out_prob的形状是[batch_size * num_queries, num_classes]
        # tgt_ids的形状是[第一个样本的目标数量 + 第二个样本的目标数量 + ... + 第n个样本的目标数量]
        # cost_class的形状是[bs, num_queries, (第一个样本的目标数量 + 第二个样本的目标数量 + ... + 第n个样本的目标数量)]

        # Compute the L1 cost between boxes
        cost_bbox = torch.cdist(out_bbox, tgt_bbox, p=1)
        # [batch_size * num_queries, 4] - [第一个样本的目标数量 + 第二个样本的目标数量 + ... + 第n个样本的目标数量, 4]

        # Compute the giou cost betwen boxes
        cost_giou = -generalized_box_iou(box_cxcywh_to_xyxy(out_bbox), box_cxcywh_to_xyxy(tgt_bbox))

        # Final cost matrix
        C = self.cost_bbox * cost_bbox + self.cost_class * cost_class + self.cost_giou * cost_giou
        C = C.view(bs, num_queries, -1).cpu()
        # [bs, num_queries, (第一个样本的目标数量 + 第二个样本的目标数量 + ... + 第n个样本的目标数量)]

        # 获得所有目标的个数
        sizes = [len(v["boxes"]) for v in targets]
        # torch.split()作用将tensor分成块结构
        # sizes是一个列表[int]，列表中的元素int是每个块的大小，代表每个样本的目标数量
        # -1是分块沿着的维度
        # i是第i个样本
        # c的形状是[bs, num_queries, 第i个样本的目标数量]
        # c[i]的形状是[num_queries, 第i个样本的目标数量]（比较重要），代表第i个样本的代价图
        # linear_sum_assignment(c[i])得到row_ind和col_ind
        # row_ind是行索引，[0,1,2,3,4,5]（顺序的），长度为num_queries
        # indices是每行选取哪个列，[3,2,5,1,0,4]
        indices = [linear_sum_assignment(c[i]) for i, c in enumerate(C.split(sizes, -1))]
        return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices] # i is row_ind, j is col_ind
        # 返回是一个长度为batchsize的列表，列表中每个元素为(row_ind, col_ind)，row_ind和col_ind的长度不定，长度为min(num_queries，第k个样本的目标数量)
        # 一般num_queries大于第k个样本的目标数量
        # row_ind是从N个候选中选取k（顺序）

def build_matcher(args):
    return HungarianMatcher(cost_class=args.set_cost_class, cost_bbox=args.set_cost_bbox, cost_giou=args.set_cost_giou)


# linear_sum_assignment算法是不完全匹配算法，只匹配必要的
"""
x = numpy.random.random([5,3])
array([[0.43182144, 0.50017166, 0.49610766],
       [0.42396374, 0.24765477, 0.73119888],
       [0.91074587, 0.65527871, 0.15078234],
       [0.15227237, 0.04692568, 0.13441276],
       [0.23123541, 0.91359641, 0.55209858]])
linear_sum_assignment(x)
(array([2, 3, 4], dtype=int64), array([2, 1, 0], dtype=int64))
y = numpy.random.random([3,5])
array([[0.70186983, 0.86020628, 0.03098373, 0.82915885, 0.61729529],
       [0.35147376, 0.88226606, 0.309722  , 0.15875757, 0.05663424],
       [0.34423317, 0.25305932, 0.29509905, 0.28896874, 0.08956875]])
linear_sum_assignment(y)
(array([0, 1, 2], dtype=int64), array([2, 3, 4], dtype=int64))
"""
