# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
DETR model and criterion classes.
"""
import torch
import torch.nn.functional as F
from torch import nn

from util import box_ops
from util.misc import (NestedTensor, nested_tensor_from_tensor_list,
                       accuracy, get_world_size, interpolate,
                       is_dist_avail_and_initialized)

from .backbone import build_backbone
from .matcher import build_matcher
from .segmentation import (DETRsegm, PostProcessPanoptic, PostProcessSegm,
                           dice_loss, sigmoid_focal_loss)
from .transformer import build_transformer


class DETR(nn.Module):
    """ This is the DETR module that performs object detection """
    def __init__(self, backbone, transformer, num_classes, num_queries, aux_loss=False):
        """ Initializes the model.
        Parameters:
            backbone: torch module of the backbone to be used. See backbone.py
            transformer: torch module of the transformer architecture. See transformer.py
            num_classes: number of object classes
            num_queries: number of object queries, ie detection slot. This is the maximal number of objects
                         DETR can detect in a single image. For COCO, we recommend 100 queries.
            aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used.
        """
        super().__init__()
        self.num_queries = num_queries  # args.num_queries=100
        self.transformer = transformer
        hidden_dim = transformer.d_model  # d_model=args.hidden_dim=256, 为 transformer的输入通道数
        self.class_embed = nn.Linear(hidden_dim, num_classes + 1)  # hidden_dim=256, coco.num_classes=91(+1为背景类)
        self.bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3)  # input_dim=256, output_dim=4
        self.query_embed = nn.Embedding(num_queries, hidden_dim)  # 输入num_queries=100, hidden_dim=256，即生成 num_embeddings行, embedding_dim列的正太分布数据作为编码
        self.input_proj = nn.Conv2d(backbone.num_channels, hidden_dim, kernel_size=1)  # resnet50、101输出通道数num_channels为2048 # Size([2, 2048, 24, 36]) --> Size([2, 256, 24, 36])
        self.backbone = backbone
        self.aux_loss = aux_loss

    def forward(self, samples: NestedTensor):
        """ The forward expects a NestedTensor, which consists of:
               - samples.tensor: batched images, of shape [batch_size x 3 x H x W]  # 3为 channel数 RGB
               - samples.mask: a binary mask of shape [batch_size x H x W], containing 1 on padded pixels  # 二进制掩码

            It returns a dict with the following elements:
               - "pred_logits": the classification logits (including no-object) for all queries.
                                Shape= [batch_size x num_queries x (num_classes + 1)]
               - "pred_boxes": The normalized boxes coordinates for all queries, represented as
                               (center_x, center_y, height, width). These values are normalized in [0, 1],
                               relative to the size of each individual image (disregarding possible padding).
                               See PostProcess for information on how to retrieve the unnormalized bounding box.
               - "aux_outputs": Optional, only returned when auxilary losses are activated. It is a list of
                                dictionnaries containing the two above keys for each decoder layer.
        """
        if isinstance(samples, (list, torch.Tensor)):  # 检查变量samples是否是 list 或 torch.Tensor 类型之一（输入一般为NestedTensor元组，非）
            samples = nested_tensor_from_tensor_list(samples)  # 在misc.py中定义，返回 NestedTensor(tensor, mask)
        features, pos = self.backbone(samples)  # 输入进入backbone，返回特征图和位置编码(包含多个中间层) # features[mask.Size([2, 24, 36]), tensor.Size([2, 2048, 24, 36])]，pos[torch.Size([2, 256, 24, 36])]

        src, mask = features[-1].decompose()  # 在 backbone 中生成 src 和 mask，其中features包含多个层，[-1]表示返回最后一层（输出层）
        assert mask is not None  # src.Size([2, 2048, 24, 36])，mask.Size([2, 24, 36])
        hs = self.transformer(self.input_proj(src), mask, self.query_embed.weight, pos[-1])[0]  # 将 src(和mask) 传入 transformer # 返回值hs解码器输出：torch.Size([6, 2, 100, 256])
# ----------------阅读到此------debug到此--------------阅读到decoder输出
        outputs_class = self.class_embed(hs)  # [6, 2, 100, 256] --> [6, 2, 100, 92]
        outputs_coord = self.bbox_embed(hs).sigmoid()  # [6, 2, 100, 256] --> [6, 2, 100, 4]
        out = {'pred_logits': outputs_class[-1], 'pred_boxes': outputs_coord[-1]}  # [-1]表示只取编码器最后一层输出，logits:[2, 100, 92]、boxes:[2, 100, 4]
        if self.aux_loss:  # 如果设置了辅助解码损失，即在模型中的每个解码层都计算损失，则如下
            out['aux_outputs'] = self._set_aux_loss(outputs_class, outputs_coord)
        return out  # logits:[2, 100, 92]、boxes:[2, 100, 4]

    @torch.jit.unused
    def _set_aux_loss(self, outputs_class, outputs_coord):
        # this is a workaround to make torchscript happy, as torchscript
        # doesn't support dictionary with non-homogeneous values, such
        # as a dict having both a Tensor and a list.
        return [{'pred_logits': a, 'pred_boxes': b}  # 为每个解码器层输出计算损失
                for a, b in zip(outputs_class[:-1], outputs_coord[:-1])]

# ----------------阅读到此------debug到此--------------该阅读损失SetCriterion部分--->matcher匈牙利匹配大体阅读完-->阅读完SetCriterion
class SetCriterion(nn.Module):  # DETR 的损失函数
    """ This class computes the loss for DETR.
    The process happens in two steps:
        1) we compute hungarian assignment between ground truth boxes and the outputs of the model  #计算匈牙利分配之间的 GT真值盒和模型的输出
        2) we supervise each pair of matched ground-truth / prediction (supervise class and box)  #监督每一对匹配的 GT/预测(监督类和框)
    """
    def __init__(self, num_classes, matcher, weight_dict, eos_coef, losses):
        """ Create the criterion.
        Parameters:
            num_classes: number of object categories, omitting the special no-object category
            matcher: module able to compute a matching between targets and proposals  # 能够计算 targets 和 proposals之间匹配的模块
            weight_dict: dict containing as key the names of the losses and as values their relative weight.
            eos_coef: relative classification weight applied to the no-object category  # 应用于无目标类别的相对分类权重
            losses: list of all the losses to be applied. See get_loss for list of available losses.
        """
        super().__init__()
        self.num_classes = num_classes
        self.matcher = matcher
        self.weight_dict = weight_dict
        self.eos_coef = eos_coef
        self.losses = losses
        empty_weight = torch.ones(self.num_classes + 1)
        empty_weight[-1] = self.eos_coef
        self.register_buffer('empty_weight', empty_weight)  # 作用是注册缓冲区为模块的一部分，但不会被optimizer更新。注册的缓冲区在保存和加载模型时会被包括在内。

    def loss_labels(self, outputs, targets, indices, num_boxes, log=True):
        """Classification loss (NLL)
        targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes]
        """
        assert 'pred_logits' in outputs
        src_logits = outputs['pred_logits']  # 获取预测的类别logits

        idx = self._get_src_permutation_idx(indices)  # 调用 _get_src_permutation_idx 方法获取源（预测）的索引
        target_classes_o = torch.cat([t["labels"][J] for t, (_, J) in zip(targets, indices)])  # target_classes_o: 按照匹配索引 indices，从 targets 中提取真实的类别标签，并将它们拼接成一个张量。
        target_classes = torch.full(src_logits.shape[:2], self.num_classes,
                                    dtype=torch.int64, device=src_logits.device)  # target_classes: 创建一个与 src_logits 形状相同的张量，并用 self.num_classes（表示背景类）填充。
        target_classes[idx] = target_classes_o  # 将真实的目标类别标签填充到对应位置，未匹配的位置保留为背景类。

        loss_ce = F.cross_entropy(src_logits.transpose(1, 2), target_classes, self.empty_weight)  # 计算交叉熵损失 # src_logits.transpose(1, 2): 要求输入的 logits 张量需要是 [batch_size, num_classes + 1, num_queries] 的形状
        losses = {'loss_ce': loss_ce}  # 构建损失字典

        if log:  # 如果 log=True，则计算额外的类别错误率
            # TODO this should probably be a separate loss, not hacked in this one here
            losses['class_error'] = 100 - accuracy(src_logits[idx], target_classes_o)[0]
        return losses

    @torch.no_grad()
    def loss_cardinality(self, outputs, targets, indices, num_boxes):  # 计算基数误差(预测的非空盒子数目的绝对误差)这并不是真正的损失，它只是用于日志记录目的，不会传播渐变
        """ Compute the cardinality error, ie the absolute error in the number of predicted non-empty boxes
        This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients
        """
        pred_logits = outputs['pred_logits']
        device = pred_logits.device
        tgt_lengths = torch.as_tensor([len(v["labels"]) for v in targets], device=device)  # 计算每个目标字典中 "labels" 键对应的目标类别数量，存储在 tgt_lengths 张量中
        # Count the number of predictions that are NOT "no-object" (which is the last class)
        card_pred = (pred_logits.argmax(-1) != pred_logits.shape[-1] - 1).sum(1)  # 计算预测的非空框数量
        card_err = F.l1_loss(card_pred.float(), tgt_lengths.float())  # 使用 L1 损失（绝对误差）计算预测的非空框数量与真实目标框数量之间的差异
        losses = {'cardinality_error': card_err}
        return losses

    def loss_boxes(self, outputs, targets, indices, num_boxes):  # 用于计算边界框的损失，包括 L1 回归损失和广义 IoU（GIoU）损失
        """Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss
           targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]
           The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size.
        """
        assert 'pred_boxes' in outputs
        idx = self._get_src_permutation_idx(indices)  # 获取匹配索引
        src_boxes = outputs['pred_boxes'][idx]  # 获取预测和目标边界框
        target_boxes = torch.cat([t['boxes'][i] for t, (_, i) in zip(targets, indices)], dim=0)

        loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction='none')  # 计算 L1 损失

        losses = {}
        losses['loss_bbox'] = loss_bbox.sum() / num_boxes  # 归一化 L1 损失

        loss_giou = 1 - torch.diag(box_ops.generalized_box_iou(  # 计算 GIoU 损失
            box_ops.box_cxcywh_to_xyxy(src_boxes),
            box_ops.box_cxcywh_to_xyxy(target_boxes)))
        losses['loss_giou'] = loss_giou.sum() / num_boxes  # 归一化 GIoU 损失
        return losses

    def loss_masks(self, outputs, targets, indices, num_boxes):
        """Compute the losses related to the masks: the focal loss and the dice loss.
           targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w]
        """
        assert "pred_masks" in outputs

        src_idx = self._get_src_permutation_idx(indices)  # 获取源和目标排列索引
        tgt_idx = self._get_tgt_permutation_idx(indices)
        src_masks = outputs["pred_masks"]  # 获取预测和目标掩码
        src_masks = src_masks[src_idx]
        masks = [t["masks"] for t in targets]
        # TODO use valid to mask invalid areas due to padding in loss
        target_masks, valid = nested_tensor_from_tensor_list(masks).decompose()
        target_masks = target_masks.to(src_masks)
        target_masks = target_masks[tgt_idx]

        # upsample predictions to the target size
        src_masks = interpolate(src_masks[:, None], size=target_masks.shape[-2:],  # 上采样预测掩码
                                mode="bilinear", align_corners=False)
        src_masks = src_masks[:, 0].flatten(1)

        target_masks = target_masks.flatten(1)  # 将目标掩码展平并调整形状以匹配预测掩码的形状。
        target_masks = target_masks.view(src_masks.shape)
        losses = {
            "loss_mask": sigmoid_focal_loss(src_masks, target_masks, num_boxes),  # 计算 Focal Loss 损失
            "loss_dice": dice_loss(src_masks, target_masks, num_boxes),  # 计算 Dice Loss 损失
        }
        return losses

    def _get_src_permutation_idx(self, indices):
        # permute predictions following indices
        batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)])
        src_idx = torch.cat([src for (src, _) in indices])
        return batch_idx, src_idx

    def _get_tgt_permutation_idx(self, indices):
        # permute targets following indices
        batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)])
        tgt_idx = torch.cat([tgt for (_, tgt) in indices])
        return batch_idx, tgt_idx

    def get_loss(self, loss, outputs, targets, indices, num_boxes, **kwargs):
        loss_map = {
            'labels': self.loss_labels,
            'cardinality': self.loss_cardinality,
            'boxes': self.loss_boxes,
            'masks': self.loss_masks
        }
        assert loss in loss_map, f'do you really want to compute {loss} loss?'
        return loss_map[loss](outputs, targets, indices, num_boxes, **kwargs)

    def forward(self, outputs, targets):
        """ This performs the loss computation.
        Parameters:
             outputs: dict of tensors, see the output specification of the model for the format
             targets: list of dicts, such that len(targets) == batch_size.
                      The expected keys in each dict depends on the losses applied, see each loss' doc
        """
        outputs_without_aux = {k: v for k, v in outputs.items() if k != 'aux_outputs'}  # 从 outputs 中去除辅助输出（中间层输出），（如果）只保留最后一层的输出

        # Retrieve the matching between the outputs of the last layer and the targets
        indices = self.matcher(outputs_without_aux, targets)  # 使用匈牙利算法在输出和目标之间建立匹配

        # Compute the average number of target boxes accross all nodes, for normalization purposes
        # 计算所有目标框的总数量，作为损失归一化的基准。如果使用分布式训练，进行全局归一化。
        num_boxes = sum(len(t["labels"]) for t in targets)
        num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device)
        if is_dist_avail_and_initialized():
            torch.distributed.all_reduce(num_boxes)  # all_reduce 为阻塞式通信操作，意味着它会等待所有参与的 GPU 都完成各自的梯度计算并发送它们的梯度
        num_boxes = torch.clamp(num_boxes / get_world_size(), min=1).item()

        # Compute all the requested losses
        # 遍历 self.losses 中定义的所有损失类型，调用 get_loss 方法计算每种损失，并将结果更新到 losses 字典中
        losses = {}
        for loss in self.losses:
            losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes))

        # In case of auxiliary losses, we repeat this process with the output of each intermediate layer.
        # 如果 outputs 中包含辅助输出（aux_outputs），为每层辅助输出重复上述损失计算过程
        if 'aux_outputs' in outputs:
            for i, aux_outputs in enumerate(outputs['aux_outputs']):
                indices = self.matcher(aux_outputs, targets)
                for loss in self.losses:
                    if loss == 'masks':  # 跳过中间层的掩码损失计算，因为代价太高
                        # Intermediate masks losses are too costly to compute, we ignore them.
                        continue
                    kwargs = {}
                    if loss == 'labels':
                        # Logging is enabled only for the last layer
                        kwargs = {'log': False}
                    l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_boxes, **kwargs)
                    l_dict = {k + f'_{i}': v for k, v in l_dict.items()}
                    losses.update(l_dict)

        return losses

# --------------查看 evaluate 函数-->查阅 postprocessors 如何获得以及作用--->未搞懂该函数作用，评估用？
class PostProcess(nn.Module):  # 后处理函数，返回值包括预测的得分、类别和边界框信息（scores, labels, boxes）
    """ This module converts the model's output into the format expected by the coco api"""
    @torch.no_grad()
    def forward(self, outputs, target_sizes):
        """ Perform the computation
        Parameters:
            outputs: raw outputs of the model
            target_sizes: tensor of dimension [batch_size x 2] containing the size of each images of the batch
                          For evaluation, this must be the original image size (before any data augmentation)
                          For visualization, this should be the image size after data augment, but before padding
        """
        out_logits, out_bbox = outputs['pred_logits'], outputs['pred_boxes']  # 从模型的原始输出中提取预测的类别、边界框 # 'pred_logits':[2, 100, 92], 'pred_boxes':[2, 100, 4],
                               # out_logits维度[B, C, S]；out_bbox维度[B, C, 4]，其中4为xc/yc/w/h相对值，在0-1之间

        assert len(out_logits) == len(target_sizes)  # 判断维数是否相同 # len(out_logits)=3
        assert target_sizes.shape[1] == 2  # 验证目标张量大小是否为2维,target_sizes维度为[B, 2]

        prob = F.softmax(out_logits, -1)  # 进行softmax操作，将预测的原始logits转换为概率（0对列做1对行做，-1对最后维度即行）
        scores, labels = prob[..., :-1].max(-1)  # 计算每个预测框的最大概率得分和对应的类别标签，max的值赋给scores，对应的index赋给labels
                         # 解释：prob维度[B, C, S]，prob[..., :-1]为去除概率中最后一个类(背别类)预测概率值，max(-1)表示在最后一个维度上寻找最大值及索引，最终scores, labels维度都为[B, C]
        # convert to [x0, y0, x1, y1] format
        boxes = box_ops.box_cxcywh_to_xyxy(out_bbox)  # 将预测的边界框从'中心点坐标和宽高'形式转换为'左上角和右下角坐标'形式[x0,y0,x1,y1]，boxes维度为[B, C, 4]
        # and from relative [0, 1] to absolute [0, height] coordinates
        img_h, img_w = target_sizes.unbind(1)  # 将图像尺寸拆分成高度和宽度
        scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1)  # 创建尺度因子,scale_fct维度为[B, 4]
        boxes = boxes * scale_fct[:, None, :]  # 将边界框坐标根据图像尺寸进行缩放恢复尺寸，其中'[:, None, :]'将scale_fct维度[B, 4]扩充为[B, 1, 4]
                # 方便 boxes（[B, N, 4]）与 scale_fct（[batch_size, 1, 4]）计算

        results = [{'scores': s, 'labels': l, 'boxes': b} for s, l, b in zip(scores, labels, boxes)]
                  # 将预测结果以字典形式存储在列表中，每个字典包含预测的得分、类别和边界框信息
        return results


class MLP(nn.Module):
    """ Very simple multi-layer perceptron (also called FFN)"""
    """
    self.layers shape: ModuleList(
        (0): Linear(in_features=256, out_features=256, bias=True)
        (1): Linear(in_features=256, out_features=256, bias=True)
        (2): Linear(in_features=256, out_features=4, bias=True))
    """

    def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
        super().__init__()
        self.num_layers = num_layers
        h = [hidden_dim] * (num_layers - 1)
        self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))

    def forward(self, x):
        for i, layer in enumerate(self.layers):
            x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
        return x


def build(args):  # 搭建模型框架，返回模型、损失函数、后处理模块
    # the `num_classes` naming here is somewhat misleading.
    # it indeed corresponds to `max_obj_id + 1`, where max_obj_id
    # is the maximum id for a class in your dataset. For example,
    # COCO has a max_obj_id of 90, so we pass `num_classes` to be 91.
    # As another example, for a dataset that has a single class with id 1,
    # you should pass `num_classes` to be 2 (max_obj_id + 1).
    # For more details on this, check the following discussion
    # https://github.com/facebookresearch/detr/issues/108#issuecomment-650269223

    # num_classes = 20 if args.dataset_file != 'coco' else 91  # 设置识别目标类型，可根据自己的数据集修改，传入DETR()、SetCriterion()
    # if args.dataset_file == "coco_panoptic":
    #     # for panoptic, we just add a num_classes that is large enough to hold
    #     # max_obj_id + 1, but the exact value doesn't really matter
    #     num_classes = 250

    num_classes = 21  # VOC数据集，20类+1
    # num_classes = 11  # NWPU数据集，10类+1
    # num_classes = 2  # 单类飞机数据集，1类+1

    device = torch.device(args.device)

    backbone = build_backbone(args)  # 搭建骨干网络，传入DETR()
# ----------------重新阅读到此------重新debug到此--------------先阅读build_model构建模型---->阅读build_transformer---->已经阅完
    transformer = build_transformer(args)  # 搭建 transformer，传入DETR()

    model = DETR(  # 搭建 DETR 模型
        backbone,
        transformer,
        num_classes=num_classes,  # 'coco' 91
        num_queries=args.num_queries,  # 100
        aux_loss=args.aux_loss,
    )
    if args.masks:  # 如果掩码，则搭建 DETRsegm 分割模型
        model = DETRsegm(model, freeze_detr=(args.frozen_weights is not None))
    matcher = build_matcher(args)  # 匈牙利匹配算法
    weight_dict = {'loss_ce': 1, 'loss_bbox': args.bbox_loss_coef}  # loss_ce表示交叉熵损失，loss_bbox表示边界框损失
    weight_dict['loss_giou'] = args.giou_loss_coef  # weight_dict存储损失函数中各项损失的权重,调节其在总体损失中的贡献程度
    if args.masks:
        weight_dict["loss_mask"] = args.mask_loss_coef  # 掩码损失
        weight_dict["loss_dice"] = args.dice_loss_coef  # dice损失（用于分割）
    # TODO this is a hack
    if args.aux_loss:  # 辅助解码损失（在解码器的每层都计算损失）
        aux_weight_dict = {}
        for i in range(args.dec_layers - 1):
            aux_weight_dict.update({k + f'_{i}': v for k, v in weight_dict.items()})  # 生成每层编码器的新键值，k + f'_{i}'为新键的形式，如 loss_giou_0、loss_giou_2等
        weight_dict.update(aux_weight_dict)  # 将 aux_weight_dict 中的键值对添加到 weight_dict 中

    losses = ['labels', 'boxes', 'cardinality']  # 包括分类损失'labels'、边界框回归损失'boxes'和目标数量损失'cardinality'，传入SetCriterion()
    if args.masks:
        losses += ["masks"]
# ----------------阅读到此------debug到此--------------该阅读损失SetCriterion部分--->matcher匈牙利匹配大体阅读完-->
    criterion = SetCriterion(num_classes, matcher=matcher, weight_dict=weight_dict,
                             eos_coef=args.eos_coef, losses=losses)  # criterion为损失函数，参数包括了类别损失、边界框损失、卡类损失等 # eos_coef：相对分类损失中无对象类别的权重
    criterion.to(device)
    postprocessors = {'bbox': PostProcess()}  # 'bbox'对应的PostProcess()的返回值为{'scores': s, 'labels': l, 'boxes': b}
    if args.masks:
        postprocessors['segm'] = PostProcessSegm()
        if args.dataset_file == "coco_panoptic":
            is_thing_map = {i: i <= 90 for i in range(201)}  # is_thing_map是一个包含 201个键值对的字典，键是从 0到 200的整数，值是i<=90对应的布尔值
            postprocessors["panoptic"] = PostProcessPanoptic(is_thing_map, threshold=0.85)

    return model, criterion, postprocessors
