# -*- coding: utf-8 -*-
# author： woldier wong
# datetime： 3/12/24 3:15 AM
# ide： PyCharm


from typing import Dict, List, Optional, Tuple
import torch
from torch import nn, Tensor
from torch.nn import functional as F
# Import AnchorGenerator to keep compatibility.
from . anchor_utils import AnchorGenerator  # noqa: 401

from torchvision.models.detection.image_list import ImageList
# from torchvision.ops import boxes as box_ops, Conv2dNormActivation
from torchvision.ops import Conv2dNormActivation
# from torchvision.models.detection import _utils as det_utils
from . import det_utils,  boxes as box_ops


class RPNHead(nn.Module):
    """
    Adds a simple RPN Head with classification and regression heads
    RPN 中的分类头与回归头
    Args:
        in_channels (int): number of channels of the input feature 输入特征图的通道数
        num_anchors (int): number of anchors to be predicted  被预测的anchors的数目
        conv_depth (int, optional): number of convolutions  卷积层的深度
    """

    _version = 2

    def __init__(self, in_channels: int, num_anchors: int, conv_depth=1) -> None:
        """
        初始化
        :param in_channels: 输入特征图的通道数
        :param num_anchors: 每个像素点被预测的anchors的数目
        :param conv_depth: 卷积层的深度
        """
        super().__init__()
        convs = []
        for _ in range(conv_depth):
            convs.append(Conv2dNormActivation(in_channels, in_channels, kernel_size=3, norm_layer=None))  # 卷积层
        self.conv = nn.Sequential(*convs)  # 放入Sequential model 执行
        self.cls_logits = nn.Conv2d(in_channels, num_anchors, kernel_size=1, stride=1)  # 分类头
        self.bbox_pred = nn.Conv2d(in_channels, num_anchors * 4, kernel_size=1, stride=1)  # 回归头
        # 初始化权重
        for layer in self.modules():
            if isinstance(layer, nn.Conv2d):
                torch.nn.init.normal_(layer.weight, std=0.01)  # type: ignore[arg-type]
                if layer.bias is not None:
                    torch.nn.init.constant_(layer.bias, 0)  # type: ignore[arg-type]

    def _load_from_state_dict(
            self,
            state_dict,
            prefix,
            local_metadata,
            strict,
            missing_keys,
            unexpected_keys,
            error_msgs,
    ):
        version = local_metadata.get("version", None)

        if version is None or version < 2:
            for type in ["weight", "bias"]:
                old_key = f"{prefix}conv.{type}"
                new_key = f"{prefix}conv.0.0.{type}"
                if old_key in state_dict:
                    state_dict[new_key] = state_dict.pop(old_key)

        super()._load_from_state_dict(
            state_dict,
            prefix,
            local_metadata,
            strict,
            missing_keys,
            unexpected_keys,
            error_msgs,
        )

    def forward(self, x: List[Tensor]) -> Tuple[List[Tensor], List[Tensor]]:
        """
        由于可能有多阶段的特征图输出 因此是一个list list中的一个tensor的shape是[B, C, H, W]
        :param x:
        :return:
        """
        logits = []
        bbox_reg = []
        for feature in x:
            t = self.conv(feature)
            logits.append(self.cls_logits(t))
            bbox_reg.append(self.bbox_pred(t))
        return logits, bbox_reg


def permute_and_flatten(layer: Tensor, N: int, A: int, C: int, H: int, W: int) -> Tensor:
    """
    展平与转换
    :param layer:
    :param N:
    :param A:
    :param C:
    :param H:
    :param W:
    :return:
    """
    layer = layer.view(N, -1, C, H, W)
    layer = layer.permute(0, 3, 4, 1, 2)
    layer = layer.reshape(N, -1, C)
    return layer


def concat_box_prediction_layers(box_cls: List[Tensor], box_regression: List[Tensor]) -> Tuple[Tensor, Tensor]:
    """
    将同一图片的不同特征图预测结果进行concat
    :param box_cls:  不同特征图中的分类结果
    :param box_regression: 不同特征图中的回归预测结果
    :return:
    """
    box_cls_flattened = []
    box_regression_flattened = []
    # for each feature level, permute the outputs to make them be in the
    # same format as the labels. Note that the labels are computed for
    # all feature levels concatenated, so we keep the same representation
    # for the objectness and the box_regression
    for box_cls_per_level, box_regression_per_level in zip(box_cls, box_regression):  # 遍历不同的特征层
        # 得到 N: batch 大小, AxC: 一个像素点的锚框总数✖分类数
        # H: 特征图的高度, W: 特征图的宽度
        N, AxC, H, W = box_cls_per_level.shape
        # 得到锚框预测的参数数量 A✖4 即锚框的数量乘以4个缩放系数
        Ax4 = box_regression_per_level.shape[1]
        # 计算锚框的个数
        A = Ax4 // 4
        # 计算分类数, 这样做是为了解决用多分类, 和binary分类导致计算出问题(因为binary 分类只需要预测一个值, 而多分类则是需要预测两个)
        C = AxC // A
        # 位置转换与拉平, 形成 [B,-1,C]
        box_cls_per_level = permute_and_flatten(box_cls_per_level, N, A, C, H, W)
        box_cls_flattened.append(box_cls_per_level)
        # 位置转换与拉平, 形成 [B,-1,4]
        box_regression_per_level = permute_and_flatten(box_regression_per_level, N, A, 4, H, W)
        box_regression_flattened.append(box_regression_per_level)
    # concatenate on the first dimension (representing the feature levels), to
    # take into account the way the labels were generated (with all feature maps
    # being concatenated as well)
    box_cls = torch.cat(box_cls_flattened, dim=1).flatten(0, -2)
    box_regression = torch.cat(box_regression_flattened, dim=1).reshape(-1, 4)
    return box_cls, box_regression


class RegionProposalNetwork(torch.nn.Module):
    """
    Implements Region Proposal Network (RPN).
    RPN 网络的实现
    Args:
        anchor_generator (AnchorGenerator): module that generates the anchors for a set of feature
            maps.
            对一批特征图生成anchors的model
        head (nn.Module): module that computes the objectness and regression deltas
            计算目标物和回归值的模型(二分类和边框预测)
        fg_iou_thresh (float): minimum IoU between the anchor and the GT box so that they can be
            considered as positive during training of the RPN.
            在训练RPN时, 可以将锚框判定为正样本的锚框和GT box之前的最小IoU值.
        bg_iou_thresh (float): maximum IoU between the anchor and the GT box so that they can be
            considered as negative during training of the RPN.
            在 RPN 的训练中 锚点和 GT 箱之间的最大 IoU，以便在训练 RPN 时将其视为负值。
        batch_size_per_image (int): number of anchors that are sampled during training of the RPN
            for computing the loss
            在训练过程中RPN用于计算损失的锚框的数量.
        positive_fraction (float): proportion of positive anchors in a mini-batch during training
            of the RPN
            训练过程总PRN中一个mini-batch中正样本的占比
        pre_nms_top_n (Dict[str, int]): number of proposals to keep before applying NMS. It should
            contain two fields: training and testing, to allow for different values depending
            on training or evaluation
            在进行NMS之前候选框的数量, 其有两个值Dict[str, int], 分别表示在训练过程和测试过程中的值
        post_nms_top_n (Dict[str, int]): number of proposals to keep after applying NMS. It should
            contain two fields: training and testing, to allow for different values depending
            on training or evaluation
            在进行NMS之后候选框的数量, 其有两个值Dict[str, int], 分别表示在训练过程和测试过程中的值
        nms_thresh (float): NMS threshold used for postprocessing the RPN proposals
            在后处理RPN候选框时使用的NMS阈值
    """

    __annotations__ = {
        "box_coder": det_utils.BoxCoder,
        "proposal_matcher": det_utils.Matcher,
        "fg_bg_sampler": det_utils.BalancedPositiveNegativeSampler,
    }

    def __init__(
            self,
            anchor_generator: AnchorGenerator,
            head: nn.Module,
            # Faster-RCNN Training
            fg_iou_thresh: float,
            bg_iou_thresh: float,
            batch_size_per_image: int,
            positive_fraction: float,
            # Faster-RCNN Inference
            pre_nms_top_n: Dict[str, int],
            post_nms_top_n: Dict[str, int],
            nms_thresh: float,
            score_thresh: float = 0.0,  # 滤除掉score 小于score_thresh 的proposal
    ) -> None:
        super().__init__()
        self.anchor_generator = anchor_generator  # 锚框生成器
        self.head = head  # 分类 回归头
        self.box_coder = det_utils.BoxCoder(weights=(1.0, 1.0, 1.0, 1.0))

        # used during training
        self.box_similarity = box_ops.box_iou

        self.proposal_matcher = det_utils.Matcher(
            fg_iou_thresh,  # 正例的最低阈值
            bg_iou_thresh,  # 负例的最大阈值
            allow_low_quality_matches=True,
        )
        # 正负样本采样器
        self.fg_bg_sampler = det_utils.BalancedPositiveNegativeSampler(batch_size_per_image, positive_fraction)
        # used during testing
        self._pre_nms_top_n = pre_nms_top_n
        self._post_nms_top_n = post_nms_top_n
        self.nms_thresh = nms_thresh
        self.score_thresh = score_thresh
        self.min_size = 1e-3  # size 小于1e-3的box会被移除

    def pre_nms_top_n(self) -> int:
        if self.training:
            return self._pre_nms_top_n["training"]
        return self._pre_nms_top_n["testing"]

    def post_nms_top_n(self) -> int:
        if self.training:
            return self._post_nms_top_n["training"]
        return self._post_nms_top_n["testing"]

    def assign_targets_to_anchors(
            self, anchors: List[Tensor], targets: List[Dict[str, Tensor]]
    ) -> Tuple[List[Tensor], List[Tensor]]:
        """
        将GT box 分配给anchors
        :param anchors: 候选框 (tensor1,tensor2,...tensorB) B:batch size
        :param targets: GTbox list 的长度为batch size
        :return:
        """
        labels = []
        matched_gt_boxes = []
        for anchors_per_image, targets_per_image in zip(anchors, targets):  # 遍历每一张图的anchors 与 对应的target

            gt_boxes = targets_per_image["boxes"]  # 得到GT box

            if gt_boxes.numel() == 0:  # 如果没有GT box
                # Background image (negative example)
                device = anchors_per_image.device
                # 匹配的到 GT box
                matched_gt_boxes_per_image = torch.zeros(anchors_per_image.shape, dtype=torch.float32, device=device)
                # 每个anchor的label
                labels_per_image = torch.zeros((anchors_per_image.shape[0],), dtype=torch.float32, device=device)
            else:
                # 计算GT box 与 anchors 的iou矩阵
                match_quality_matrix = self.box_similarity(gt_boxes, anchors_per_image)
                matched_idxs = self.proposal_matcher(match_quality_matrix)
                # get the targets corresponding GT for each proposal
                # NB: need to clamp the indices because we can have a single
                # GT in the image, and matched_idxs can be -2, which goes
                # out of bounds
                matched_gt_boxes_per_image = gt_boxes[matched_idxs.clamp(min=0)]

                labels_per_image = matched_idxs >= 0  # 与GT 匹配 ,那么置为前景 即 1
                labels_per_image = labels_per_image.to(dtype=torch.float32)

                # Background (negative examples)
                bg_indices = matched_idxs == self.proposal_matcher.BELOW_LOW_THRESHOLD
                labels_per_image[bg_indices] = 0.0

                # discard indices that are between thresholds
                inds_to_discard = matched_idxs == self.proposal_matcher.BETWEEN_THRESHOLDS
                labels_per_image[inds_to_discard] = -1.0

            labels.append(labels_per_image)
            matched_gt_boxes.append(matched_gt_boxes_per_image)
        return labels, matched_gt_boxes

    def _get_top_n_idx(self, objectness: Tensor, num_anchors_per_level: List[int]) -> Tensor:
        """
        获取置信度最高的proposal(不同的特征图相互独立)
        :param objectness: 目标分类预测结果 [B,A]
        :param num_anchors_per_level: 一张图片在在每个特征图中拥有的anchors 的总数, 这里list的长度指的是特征图的个数
        :return:
        """
        r = []
        offset = 0
        # 将objectness在dim=1上进行split,分成len(num_anchors_per_level)个子tensor
        # 形成一个子tensor数组索引为i的tensor shape为[B,A_i],
        # 其中A_i为在第i个特征图中的anchors的个数, 随后遍历该数组.
        for ob in objectness.split(num_anchors_per_level, 1):
            num_anchors = ob.shape[1]  # 得到在该特征图中的anchors个数
            # 得到需要筛选出的boxes总数, 不直接取self.pre_nms_top_n()是因为有可能不足self.pre_nms_top_n()个
            pre_nms_top_n = det_utils._topk_min(ob, self.pre_nms_top_n(), 1)
            _, top_n_idx = ob.topk(pre_nms_top_n, dim=1)  # 得到最大的pre_nms_top_n个值的 值(_) 和 索引(top_n_idx)
            r.append(top_n_idx + offset)
            offset += num_anchors  # offset 值增加, 增加的数值为当前这个特征图中的anchor的总数
        # 将 [ [B, A_1], [B, A_2],..., [B, A_n] ] 个tensor 在dim = 1 上cat
        return torch.cat(r, dim=1)

    def filter_proposals(
            self,
            proposals: Tensor,
            objectness: Tensor,
            image_shapes: List[Tuple[int, int]],
            num_anchors_per_level: List[int],
    ) -> Tuple[List[Tensor], List[Tensor]]:
        """
        滤除proposals
        :param proposals: anchor经过回归预测修正后得到的proposals  [B,A,4]
        :param objectness: 目标分类预测结果 [B*A, 1]
        :param image_shapes:  每一张图片在padding 之前的shape
        :param num_anchors_per_level: 一张图片在在每个特征图中拥有的anchors 的总数, 这里list的长度指的是特征图的个数
        :return:
        """

        num_images = proposals.shape[0]  # 得到batch 大小
        device = proposals.device  # 当前设备信息
        # do not backprop through objectness
        objectness = objectness.detach()  # 对objectness产生一个拷贝并返回, 其不存在梯度, 会从当前的计算图中脱离
        objectness = objectness.reshape(num_images, -1)  # reshape [B, A]
        # 对每个特征层 生成一个tensor (shape [n], n为该特征图中的 anchors 总数)并将值填充为特征图的索引值(0,1,2...)
        levels = [
            torch.full((n,), idx, dtype=torch.int64, device=device) for idx, n in enumerate(num_anchors_per_level)
        ]
        # 将不同特征图的levels 拼接起来
        # shape [F1*A1+F2*A2+...] ,令A = F1*A1+F2*A2+..., 则A表示一个图片所有特征图中anchors的总数
        levels = torch.cat(levels, 0)
        # reshape成[1,A] , 然后扩充成与objectness相同的形状[B,A]
        levels = levels.reshape(1, -1).expand_as(objectness)

        # select top_n boxes independently per level before applying nms
        # 在nms之前为不同的特征层独立的选择top_n个boxes
        # 处理后 shape [B,n * F ] num: 一个特征图中值最大的n个值, F个特征层
        # 也有可能是 [B, min( n_1,n ) + min( n_2 ) + .. + min( n_n )] 假设i个特征层中anchor的个数为n_i
        top_n_idx = self._get_top_n_idx(objectness, num_anchors_per_level)
        # shape [B]
        image_range = torch.arange(num_images, device=device)
        batch_idx = image_range[:, None]  # shape [B] -> [B, 1]
        # 拿到 被选择到到objectness
        objectness = objectness[batch_idx, top_n_idx]
        levels = levels[batch_idx, top_n_idx]  # 得到这些被选择出来的proposal 属于第几个特征图(记录的特征图索引)
        proposals = proposals[batch_idx, top_n_idx]  # 拿到被选择出来的proposal

        objectness_prob = torch.sigmoid(objectness)  # sigmoid 激活

        final_boxes = []
        final_scores = []
        for boxes, scores, lvl, img_shape in zip(proposals, objectness_prob, levels,
                                                 image_shapes):  # 对每一张图片的proposal进行处理
            boxes = box_ops.clip_boxes_to_image(boxes, img_shape)  # 将超出image size的boxes调整到 最大长度和最大宽度

            # remove small boxes
            keep = box_ops.remove_small_boxes(boxes, self.min_size)  # 滤除掉宽或者高小于1e-3的
            boxes, scores, lvl = boxes[keep], scores[keep], lvl[keep]

            # remove low scoring boxes
            # use >= for Backwards compatibility
            keep = torch.where(scores >= self.score_thresh)[0]  # 滤除掉score小于score_thresh的box
            boxes, scores, lvl = boxes[keep], scores[keep], lvl[keep]

            # non-maximum suppression, independently done per level
            keep = box_ops.batched_nms(boxes, scores, lvl, self.nms_thresh)  # 对每个特征层单独做nms

            # keep only topk scoring predictions
            keep = keep[: self.post_nms_top_n()]  # 保留score 最高的 n个
            boxes, scores = boxes[keep], scores[keep]

            final_boxes.append(boxes)
            final_scores.append(scores)
        return final_boxes, final_scores

    def compute_loss(
            self, objectness: Tensor, pred_bbox_deltas: Tensor, labels: List[Tensor], regression_targets: List[Tensor]
    ) -> Tuple[Tensor, Tensor]:
        """ 计算损失
        Args:
            objectness (Tensor) shape [B*A, 1]
            pred_bbox_deltas (Tensor)  shape [B*A,1]
            labels (List[Tensor])  (tensor_1,tensor_2,...,tensor_B) 长度为B , tensor_i的 shape是[A,]
            regression_targets (List[Tensor]) (tensor_1,tensor_2,...,tensor_B) 长度为B , tensor_i的 shape是[A,4]

        Returns:
            objectness_loss (Tensor) 分类损失
            box_loss (Tensor)  回归损失
        """
        # 送入采样器, 进行采样
        sampled_pos_inds, sampled_neg_inds = self.fg_bg_sampler(labels)
        sampled_pos_inds = torch.where(torch.cat(sampled_pos_inds, dim=0))[0]  # 拼接 ,然后 找出mask 为1的, 即为正样本的index
        sampled_neg_inds = torch.where(torch.cat(sampled_neg_inds, dim=0))[0]  # 拼接 ,然后 找出mask 为1的, 即为负样本的index

        sampled_inds = torch.cat([sampled_pos_inds, sampled_neg_inds], dim=0)

        objectness = objectness.flatten()  # 展平 [B*A,1] -> [B*A]

        labels = torch.cat(labels, dim=0)
        regression_targets = torch.cat(regression_targets, dim=0)

        box_loss = F.smooth_l1_loss(  # 计算 回归损失
            pred_bbox_deltas[sampled_pos_inds],
            regression_targets[sampled_pos_inds],
            beta=1 / 9,
            reduction="sum",
        ) / (sampled_inds.numel())
        # 计算二分类损失
        objectness_loss = F.binary_cross_entropy_with_logits(objectness[sampled_inds], labels[sampled_inds])

        return objectness_loss, box_loss

    def forward(
            self,
            images: ImageList,
            features: Dict[str, Tensor],
            targets: Optional[List[Dict[str, Tensor]]] = None,
    ) -> Tuple[List[Tensor], Dict[str, Tensor]]:

        """
        Args:
            images (ImageList): images for which we want to compute the predictions  图像
            features (Dict[str, Tensor]): features computed from the images that are
                used for computing the predictions. Each tensor in the list
                correspond to different feature levels
                特征图. list中的不同tensor 代表不同特征层的输出
            targets (List[Dict[str, Tensor]]): ground-truth boxes present in the image (optional).
                If provided, each element in the dict should contain a field `boxes`,
                with the locations of the ground-truth boxes.
                与image相关的GT box(可选). 如果提供, 那么每一个dict中必须要有key 'boxes' 代表GT box 的真实位置.

        Returns:
            boxes (List[Tensor]): the predicted boxes from the RPN, one Tensor per
                image.
                对于每一个image RPN 预测的boxes
            losses (Dict[str, Tensor]): the losses for the model during training. During
                testing, it is an empty dict.
                在训练过程中的损失, 在测试过程中, 是一个空的dict
        """
        # RPN uses all feature maps that are available
        features = list(features.values())  # 只要value 不要key
        # 对于objectness是一个数组, 其长度为特征图的个数, 一个特征图的shape是[B, C, H, W]
        objectness, pred_bbox_deltas = self.head(features)  # 送入RPN head 做分类和回归预测
        # 得到anchors, 是一个list 其长度为batch_size, 其中的一个元素为一张图片在所有不同特征图上生成的anchors的汇总
        # 因此  anchors[0].shape[0] 与 sum([o.shape[1]*o.shape[2]*o.shape[3] for o in objectness]) 相同
        anchors = self.anchor_generator(images, features)

        num_images = len(anchors)  # 得到image 的数量
        # 一张图片在每个特征图中预测框的shape , 这里list的长度指的是特征图的个数
        num_anchors_per_level_shape_tensors = [o[0].shape for o in objectness]
        # 一张图片在在每个特征图中拥有的anchors 的总数 , 这里list的长度指的是特征图的个数
        num_anchors_per_level = [s[0] * s[1] * s[2] for s in num_anchors_per_level_shape_tensors]
        # [B*A, 1], [B*A, 4]
        objectness, pred_bbox_deltas = concat_box_prediction_layers(objectness, pred_bbox_deltas)
        # apply pred_bbox_deltas to anchors to obtain the decoded proposals
        # note that we detach the deltas because Faster R-CNN do not backprop through
        # the proposals
        # 使用回归预测的偏移参数 pred_bbox_deltas通过生成的anchors计算得到预测框
        # 需要注意是的我们detach了deltas因为Faster R-CNN不会通过proposals反向传播
        proposals = self.box_coder.decode(pred_bbox_deltas.detach(), anchors)  # 将预测的偏移值与锚框进行结合, 计算出预测的proposals
        proposals = proposals.view(num_images, -1, 4)  # reshape [batch* Anchors, 1, 4] -> [batch, Anchors number, 4]
        # 得到nms 之后的boxes和scores boxes 是一个list数组长度为batch size, 里面装的tensor.就是每一张图片所选出来的候选框
        # 这里的boxes是根据打分挑选出来的, 此处并没有与target进行任何的比较
        boxes, scores = self.filter_proposals(proposals, objectness, images.image_sizes, num_anchors_per_level)

        losses = {}
        if self.training:
            if targets is None:
                raise ValueError("targets should not be None")
            # 将GTbox 分配给anchors
            labels, matched_gt_boxes = self.assign_targets_to_anchors(anchors, targets)
            regression_targets = self.box_coder.encode(matched_gt_boxes, anchors)
            loss_objectness, loss_rpn_box_reg = self.compute_loss(  # 计算损失
                objectness, pred_bbox_deltas, labels, regression_targets
            )
            losses = {
                "loss_objectness": loss_objectness,
                "loss_rpn_box_reg": loss_rpn_box_reg,
            }
        return boxes, losses


if __name__ == "__main__":
    features = {i: torch.randn((3, 16, 32 + 32 * i, 32 + 32 * i)) for i in range(3)}
    images = ImageList(torch.randn((3, 3, 512, 512)), image_sizes=[(512, 312), (512, 452), (365, 456)])
    targets = [
        {'classes': Tensor([0, 2]), 'boxes': Tensor([[12, 32, 56, 88], [14, 25, 77, 98]])},
        {'classes': Tensor([0, 2]), 'boxes': Tensor([[12, 32, 56, 88], [14, 25, 77, 98]])},
        {'classes': Tensor([0, 2]), 'boxes': Tensor([[12, 32, 56, 88], [14, 25, 77, 98]])},
    ]
    anchorGenerator = AnchorGenerator(
        sizes=((16, 32, 64), (32, 64, 128), (32, 64, 128)),
        aspect_ratios=((0.5, 1.0, 2.0), (0.5, 1.0, 2.0), (0.5, 1.0, 2.0))
    )
    rpn_head = RPNHead(in_channels=16, num_anchors=9, )

    rpn = RegionProposalNetwork(
        anchor_generator=anchorGenerator,
        head=rpn_head,
        fg_iou_thresh=0.7,
        bg_iou_thresh=0.3,
        batch_size_per_image=256,
        positive_fraction=0.5,

        pre_nms_top_n={'training': 2000, 'testing': 1000},
        post_nms_top_n={'training': 2000, 'testing': 1000},
        nms_thresh=0.7,
        score_thresh=0.
    )
    rpn(images, features, targets)
