import torch
import torchvision
from torch.nn import functional as F
from torch import nn
from network_files import boxes as box_ops
from network_files import det_utils
from torch.jit.annotations import List, Optional, Dict, Tuple
from torch import Tensor
from network_files.image_list import ImageList
import pdb

@torch.jit.unused
def _onnx_get_num_anchors_and_pre_nms_top_n(ob, orig_pre_nms_top_n):
    # type: (Tensor, int) -> Tuple[int, int]
    from torch.onnx import operators
    num_anchors = operators.shape_as_tensor(ob)[1].unsqueeze(0)
    # TODO : remove cast to IntTensor/num_anchors.dtype when
    #        ONNX Runtime version is updated with ReduceMin int64 support
    pre_nms_top_n = torch.min(torch.cat(
        (torch.tensor([orig_pre_nms_top_n], dtype=num_anchors.dtype),
         num_anchors), 0).to(torch.int32)).to(num_anchors.dtype)

    return num_anchors, pre_nms_top_n


class AnchorsGenerator(nn.Module):
    __annotations__ = {
        "cell_anchors": Optional[List[torch.Tensor]],
        "_cache": Dict[str, List[torch.Tensor]]
    }

    """
    anchors生成器
    Module that generates anchors for a set of feature maps and
    image sizes.

    The module support computing anchors at multiple sizes and aspect ratios
    per feature map.

    sizes and aspect_ratios should have the same number of elements, and it should
    correspond to the number of feature maps.

    sizes[i] and aspect_ratios[i] can have an arbitrary number of elements,
    and AnchorGenerator will output a set of sizes[i] * aspect_ratios[i] anchors
    per spatial location for feature map i.

    Arguments:
        sizes (Tuple[Tuple[int]]):
        aspect_ratios (Tuple[Tuple[float]]):
    """

    ## sizes对应anchor的scale，aspect_ratios对应anchor的不同的比例 
    def __init__(self, sizes=(128, 256, 512), aspect_ratios=(0.5, 1.0, 2.0)):
        super(AnchorsGenerator, self).__init__()

        if not isinstance(sizes[0], (list, tuple)):
            # TODO change this
            sizes = tuple((s,) for s in sizes)
        if not isinstance(aspect_ratios[0], (list, tuple)):
            aspect_ratios = (aspect_ratios,) * len(sizes)

        assert len(sizes) == len(aspect_ratios)

        self.sizes = sizes
        self.aspect_ratios = aspect_ratios
        self.cell_anchors = None
        self._cache = {}

    def generate_anchors(self, scales, aspect_ratios, dtype=torch.float32, device="cpu"):
        # type: (List[int], List[float], int, Device) -> Tensor
        """
        compute anchor sizes
        Arguments:
            scales: sqrt(anchor_area)
            aspect_ratios: h/w ratios
            dtype: float32
            device: cpu/gpu
        """
        scales = torch.as_tensor(scales, dtype=dtype, device=device)
        aspect_ratios = torch.as_tensor(aspect_ratios, dtype=dtype, device=device)
        h_ratios = torch.sqrt(aspect_ratios)
        w_ratios = 1.0 / h_ratios

        # [r1, r2, r3]' * [s1, s2, s3]
        # number of elements is len(ratios)*len(scales)
        ## 矩阵乘法运算，得到宽度比例/高度比例 * anchor比例的结果
        ws = (w_ratios[:, None] * scales[None, :]).view(-1)
        hs = (h_ratios[:, None] * scales[None, :]).view(-1)

        # left-top, right-bottom coordinate relative to anchor center(0, 0)
        # 生成的anchors模板都是以（0, 0）为中心的, shape [len(ratios)*len(scales), 4]
        base_anchors = torch.stack([-ws, -hs, ws, hs], dim=1) / 2

        return base_anchors.round()  # round 四舍五入

    def set_cell_anchors(self, dtype, device):
        # type: (int, Device) -> None
        if self.cell_anchors is not None:
            cell_anchors = self.cell_anchors
            assert cell_anchors is not None
            # suppose that all anchors have the same device
            # which is a valid assumption in the current state of the codebase
            if cell_anchors[0].device == device:
                return

        # 根据提供的sizes和aspect_ratios生成anchors模板
        # anchors模板都是以(0, 0)为中心的anchor
        cell_anchors = [
            self.generate_anchors(sizes, aspect_ratios, dtype, device)
            for sizes, aspect_ratios in zip(self.sizes, self.aspect_ratios)
        ]
        self.cell_anchors = cell_anchors

    def num_anchors_per_location(self):
        # 计算每个预测特征层上每个滑动窗口的预测目标数
        return [len(s) * len(a) for s, a in zip(self.sizes, self.aspect_ratios)]

    # For every combination of (a, (g, s), i) in (self.cell_anchors, zip(grid_sizes, strides), 0:2),
    # output g[i] anchors that are s[i] distance apart in direction i, with the same dimensions as a.
    def grid_anchors(self, grid_sizes, strides):
        # type: (List[List[int]], List[List[Tensor]]) -> List(Tensor)
        """
        anchors position in grid coordinate axis map into origin image
        计算预测特征图对应原始图像上的所有anchors的坐标
        Args:
            grid_sizes: 预测特征矩阵的height和width
            strides: 预测特征矩阵上一步对应原始图像上的步距
        """
        anchors = []
        cell_anchors = self.cell_anchors
        assert cell_anchors is not None

        # 遍历每个预测特征层的grid_size，strides和cell_anchors
        for size, stride, base_anchors in zip(grid_sizes, strides, cell_anchors):
            grid_height, grid_width = size
            stride_height, stride_width = stride
            device = base_anchors.device

            # For output anchor, compute [x_center, y_center, x_center, y_center]
            # shape: [grid_width] 对应原图上的x坐标(列)
            shifts_x = torch.arange(0, grid_width, dtype=torch.float32, device=device) * stride_width
            # shape: [grid_height] 对应原图上的y坐标(行)
            shifts_y = torch.arange(0, grid_height, dtype=torch.float32, device=device) * stride_height

            # 计算预测特征矩阵上每个点对应原图上的坐标(anchors模板的坐标偏移量)
            # torch.meshgrid函数分别传入行坐标和列坐标，生成网格行坐标矩阵和网格列坐标矩阵
            # shape: [grid_height, grid_width]
            shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x)
            shift_x = shift_x.reshape(-1)
            shift_y = shift_y.reshape(-1)

            # 计算anchors坐标(xmin, ymin, xmax, ymax)在原图上的坐标偏移量
            # shape: [grid_width*grid_height, 4]
            shifts = torch.stack([shift_x, shift_y, shift_x, shift_y], dim=1)

            # For every (base anchor, output anchor) pair,
            # offset each zero-centered base anchor by the center of the output anchor.
            # 将anchors模板与原图上的坐标偏移量相加得到原图上所有anchors的坐标信息(shape不同时会使用广播机制)
            shifts_anchor = shifts.view(-1, 1, 4) + base_anchors.view(1, -1, 4)
            anchors.append(shifts_anchor.reshape(-1, 4))

        return anchors  # List[Tensor(all_num_anchors, 4)]

    def cached_grid_anchors(self, grid_sizes, strides):
        # type: (List[List[int]], List[List[Tensor]]) -> List(Tensor)
        """将计算得到的所有anchors信息进行缓存"""
        key = str(grid_sizes) + str(strides)
        # self._cache是字典类型
        if key in self._cache:
            return self._cache[key]
        anchors = self.grid_anchors(grid_sizes, strides)
        self._cache[key] = anchors
        return anchors

    def forward(self, image_list, feature_maps):
        '''
        image_list：自定义类型，存有训练图片的tensor以及图片resize之后的size
        feature_maps：List(Tensor)结构，是Feature层，对于单输出backbone，len为1，FPN结构的为多个输出
        '''
        # type: (ImageList, List[Tensor]) -> List(Tensor)
        # 获取每个预测特征层的尺寸(height, width)
        grid_sizes = list([feature_map.shape[-2:] for feature_map in feature_maps])

        # 获取输入图像的height和width
        image_size = image_list.tensors.shape[-2:]

        # 获取变量类型和设备类型
        dtype, device = feature_maps[0].dtype, feature_maps[0].device

        # one step in feature map equate n pixel stride in origin image
        # 计算特征层上的一步等于原始图像上的步长
        ## 由图像大小除以特征矩阵的大小，长宽为1组组成个List(List(Tensor,Tensor))结构
        ## 相除取整后的结果就是要进行的步长
        strides = [[torch.tensor(image_size[0] / g[0], dtype=torch.int64, device=device),
                    torch.tensor(image_size[1] / g[1], dtype=torch.int64, device=device)] for g in grid_sizes]

        # 根据提供的sizes和aspect_ratios生成anchors模板
        self.set_cell_anchors(dtype, device)

        # 计算/读取所有anchors的坐标信息（这里的anchors信息是映射到原图上的所有anchors信息，不是anchors模板）
        # 得到的是一个list列表，对应每张预测特征图映射回原图的anchors坐标信息
        anchors_over_all_feature_maps = self.cached_grid_anchors(grid_sizes, strides)

        anchors = torch.jit.annotate(List[List[torch.Tensor]], [])
        # 遍历一个batch中的每张图像
        for i, (image_height, image_width) in enumerate(image_list.image_sizes):
            anchors_in_image = []
            # 遍历每张预测特征图映射回原图的anchors坐标信息
            for anchors_per_feature_map in anchors_over_all_feature_maps:
                anchors_in_image.append(anchors_per_feature_map)
            anchors.append(anchors_in_image)
        # 将每一张图像的所有预测特征层的anchors坐标信息拼接在一起
        # anchors是个list，每个元素为一张图像的所有anchors信息
        anchors = [torch.cat(anchors_per_image) for anchors_per_image in anchors]
        # Clear the cache in case that memory leaks.
        self._cache.clear()
        return anchors


class RPNHead(nn.Module):
    """
    通过滑动窗口计算预测目标概率与bbox regression参数

    Arguments:
        in_channels: number of channels of the input feature
        num_anchors: number of anchors to be predicted
    """

    def __init__(self, in_channels, num_anchors):
        super(RPNHead, self).__init__()
        ## 3x3 滑动窗口，注意下细节，输入输出通道数量均为输入通道数量，也就是Backbone输出特征图通道数量
        self.conv = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
        # 计算预测的目标分数（这里的目标只是指前景或者背景）
        ## 这里需要特殊的说明一下，在我们看的很多资料中，分类器输出的是2k个channel，对每个feature点做前景背景的分类，这里K表示anchor数量
        ## 在pytorch官方实现中，只输出了K个channel，也不难理解，对于2分类，并不必须使用2k来表示结果
        self.cls_logits = nn.Conv2d(in_channels, num_anchors, kernel_size=1, stride=1)
        # 计算预测的目标bbox regression参数，
        ## 每4个为1组描述一个anchor的偏移量，按次序依次为中心点x坐标偏移量(图中d_x)、中心点y坐标偏移量(图中d_y)、宽度偏移量(d_w)、高度偏移量(d_h)
        self.bbox_pred = nn.Conv2d(in_channels, num_anchors * 4, kernel_size=1, stride=1)
        
        ## 参数初始化
        ## nn.Module.children(RPNHead 继承nn.Module)返回直接子module,即上面定义的3个卷积层
        for layer in self.children():
            if isinstance(layer, nn.Conv2d):
                ## 初始化卷积层参数，均值为0方差为0.01的正太分布
                torch.nn.init.normal_(layer.weight, std=0.01)
                ## 初始化卷积层偏执，常量0
                torch.nn.init.constant_(layer.bias, 0)

    def forward(self, x):
        # type: (List[Tensor]) -> Tuple[List[Tensor], List(Tensor)]
        ## x是从backbone出来的特征矩阵
        logits = []
        bbox_reg = []
        ## 注意，这里x类型是List[Tensor]，所以可以直接遍历
        for i, feature in enumerate(x):
            ## 这一步是滑动窗口，3×3卷机
            t = F.relu(self.conv(feature))
            ## 注意下下面2步的输入均是滑动窗口的输出，如结构图所示

            ## 滑动窗口输出经过前背景分类器后产生的结果存储的list当中
            logits.append(self.cls_logits(t))
            ## 滑动窗口输出经过边框回归分类器后产生的结果存储到list当中
            bbox_reg.append(self.bbox_pred(t))
        return logits, bbox_reg


def permute_and_flatten(layer, N, A, C, H, W):
    # type: (Tensor, int, int, int, int, int) -> Tensor
    """
    调整tensor顺序，并进行reshape
    Args:
        layer: 预测特征层上预测的目标概率或bboxes regression参数
        N: batch_size
        A: anchors_num_per_position
        C: classes_num or 4(bbox coordinate)
        H: height
        W: width

    Returns:
        layer: 调整tensor顺序，并reshape后的结果[N, -1, C]
    """
    # view和reshape功能是一样的，先展平所有元素在按照给定shape排列
    # ** view函数只能用于内存中连续存储的tensor，permute等操作会使tensor在内存中变得不再连续，此时就不能再调用view函数 **
    # ** reshape则不需要依赖目标tensor是否在内存中是连续的 **
    # [batch_size, anchors_num_per_position * (C or 4), height, width]
    layer = layer.view(N, -1, C,  H, W)
    # 调换tensor维度 permute中的参数表示原始状态下layer(调用的tensor)的维度编号，编号从0开始
    ## eg. 0=N 2=C 3=H 4=W
    ## 也就是从[N, -1, C,  H, W] -> [N, H, W, -1, C]
    layer = layer.permute(0, 3, 4, 1, 2)  # [N, H, W, -1, C]
    # 这步操作之后 layer = Tensor(shape=[batch_size, num_anchors, C])
    layer = layer.reshape(N, -1, C)
    return layer


def concat_box_prediction_layers(box_cls, box_regression):
    # type: (List[Tensor], List[Tensor]) -> Tuple[Tensor, Tensor]
    """
    对box_cla和box_regression两个list中的每个预测特征层的预测信息
    的tensor排列顺序以及shape进行调整 -> [N, -1, C]
    Args:
        box_cls: 每个预测特征层上的预测目标概率
        box_regression: 每个预测特征层上的预测目标bboxes regression参数

    Returns:

    """
    # 存储anchor分类分数
    box_cls_flattened = []
    # 存储anchor box回归参数
    box_regression_flattened = []

    # 遍历每个预测特征层，对于单输出的Backbone，也就是只有一个Feature Map输出的Backbone，只会循环一次 
    for box_cls_per_level, box_regression_per_level in zip(box_cls, box_regression):
        # [batch_size, anchors_num_per_position * classes_num, height, width]
        ## 注意 AxC和Ax4都是参数名称，当计算RPN中的proposal时, A表示anchor数量，C=classes_mum = 1,只区分目标和背景
        N, AxC, H, W = box_cls_per_level.shape
        # # [batch_size, anchors_num_per_position * 4, height, width]
        Ax4 = box_regression_per_level.shape[1]
        # anchors_num_per_position
        A = Ax4 // 4
        # classes_num
        C = AxC // A

        '''
        简单说下，下面对预测值进行展平处理，方便以后应用预测值，预测值是需要和anchor进行结合最后生成候选区域的
        '''
        # 将box_cls_per_level做展平处理  处理前shape=[batch_size, anchors_num_per_position, height, width] -> [batch_size, num_anchors, C]
        ## 对于分类中的C，C=1  所以输出的结果是[batch_size, num_anchors, 1]
        box_cls_per_level = permute_and_flatten(box_cls_per_level, N, A, C, H, W)
        box_cls_flattened.append(box_cls_per_level)

        # 将box_regression_per_level做展平处理处理前shape=[batch_size, anchors_num_per_position * 4, height, width] -> [batch_size, num_anchors, 4]
        box_regression_per_level = permute_and_flatten(box_regression_per_level, N, A, 4, H, W)
        box_regression_flattened.append(box_regression_per_level)

    tmp = torch.cat(box_cls_flattened, dim=1)
    tmp1 = torch.cat(box_regression_flattened, dim=1)
    ## torch.cat将多个特征层在维度1上相加  box_cls_flattened类型是List[Tensor] 对于单个特征层来说，没什么变化
    ## 对于多个输出层来说 举个例子 假设2个tensor [4, 7, 4]与[4, 3, 4]进行cat之后变成[4, 10, 4]
    ## 而flatten是在某个或者某些维度上进行展平，这里传入的(0, 2)进行展平表示从第一个开始到倒数第二个进行展平
    ## 对于box_cls_flattened和box_regression_flattened 经过cat之后的shape是[batch_size, 所有特征层相加后的结果, 1/4（box_cls_flattened是1，box_regression_flattened是4）]
    ## 是对前2个维度进行展平
    ## 用刚才的[4, 10, 4]举例，flatten(0, -2)会变成 [40, 4]
    ## 执行之后 box_cls.shape=[batch_size * total_anchor, 1]
    ## box_regression.shape=[batch_size * total_anchor, 4] 
    ## 这里说的total anchor指所有特征层的anchor相加的结果
    box_cls = torch.cat(box_cls_flattened, dim=1).flatten(0, -2)  # start_dim, end_dim
    box_regression = torch.cat(box_regression_flattened, dim=1).reshape(-1, 4)
    return box_cls, box_regression


class RegionProposalNetwork(torch.nn.Module):
    """
    Implements Region Proposal Network (RPN).

    Arguments:
        anchor_generator (AnchorGenerator): module that generates the anchors for a set of feature
            maps.
        head (nn.Module): module that computes the objectness and regression deltas
        fg_iou_thresh (float): minimum IoU between the anchor and the GT box so that they can be
            considered as positive during training of the RPN.
        bg_iou_thresh (float): maximum IoU between the anchor and the GT box so that they can be
            considered as negative during training of the RPN.
        batch_size_per_image (int): number of anchors that are sampled during training of the RPN
            for computing the loss
        positive_fraction (float): proportion of positive anchors in a mini-batch during training
            of the RPN
        pre_nms_top_n (Dict[str]): number of proposals to keep before applying NMS. It should
            contain two fields: training and testing, to allow for different values depending
            on training or evaluation
        post_nms_top_n (Dict[str]): number of proposals to keep after applying NMS. It should
            contain two fields: training and testing, to allow for different values depending
            on training or evaluation
        nms_thresh (float): NMS threshold used for postprocessing the RPN proposals

    """
    ## 对初始化函数参数进行的注释，原则上可有可无，有了有助于理解
    __annotations__ = {
        'box_coder': det_utils.BoxCoder,
        'proposal_matcher': det_utils.Matcher,
        'fg_bg_sampler': det_utils.BalancedPositiveNegativeSampler,
        'pre_nms_top_n': Dict[str, int],
        'post_nms_top_n': Dict[str, int],
    }

    def __init__(self, anchor_generator, head,
                 fg_iou_thresh, bg_iou_thresh,
                 batch_size_per_image, positive_fraction,
                 pre_nms_top_n, post_nms_top_n, nms_thresh):
        '''
        完成初始化相关逻辑
        Arguments:
            anchor_generator：上节内容讲的AnchorGenerator，负责根据feature map和原图生成映射到原图的anchor
            head：上节内容讲的RPNHead，对anchor的前背景分类以及anchor的矫正进行预测
            fg_iou_thresh：rpn计算损失时，采集正负样本的阈值，具体根据与GT的iou值进行筛选，这个是正样本阈值，大于阈值的为正样本
            bg_iou_thresh：rpn计算损失时，采集正负样本的阈值，具体根据与GT的iou值进行筛选，这个是负样本阈值，小于阈值的为负样本
            (处于fg_iou_thresh与bg_iou_thresh之间的舍去)
            batch_size_per_image：rpn计算损失时采样的样本数
            positive_fraction：正样本占总样本的比例
            pre_nms_top_n：做nms前对每个预测特征层保留的目标个数
            post_nms_top_n：做nms之后剩余的目标个数
            nms_thresh：做nms时的iou阈值
        '''
        super(RegionProposalNetwork, self).__init__()
        self.anchor_generator = anchor_generator
        self.head = head
        self.box_coder = det_utils.BoxCoder(weights=(1.0, 1.0, 1.0, 1.0))

        # function，计算2个box的iou
        # 仅训练时使用
        self.box_similarity = box_ops.box_iou

        ## 对anchor分类  正样本、负样本、舍弃样本
        # 特殊说明下allow_low_quality_matches这个参数  当这个参数为true时表示，与GT box所匹配的anchor(最大iouanchor)不管是否满足iou大于high_threshold的条件都认定为正样本
        # 如果为负 即表示匹配的anchor不满足大于high_threshold时可能会被丢弃或者认定为负样本
        self.proposal_matcher = det_utils.Matcher(
            fg_iou_thresh,  # 当iou大于fg_iou_thresh(0.7)时视为正样本
            bg_iou_thresh,  # 当iou小于bg_iou_thresh(0.3)时视为负样本
            allow_low_quality_matches=True
        )
        ## 样本取样，选取正样本、负样本参与计算loss
        ## batch_size_per_image选取样本总个数
        ## positive_fraction样本中正样本所占比例
        self.fg_bg_sampler = det_utils.BalancedPositiveNegativeSampler(
            batch_size_per_image, positive_fraction  # 256, 0.5
        )

        # use during testing
        self._pre_nms_top_n = pre_nms_top_n
        self._post_nms_top_n = post_nms_top_n
        self.nms_thresh = nms_thresh
        ## 过滤proposal时使用的参数
        self.min_size = 1e-3

    def pre_nms_top_n(self):
        if self.training:
            return self._pre_nms_top_n['training']
        return self._pre_nms_top_n['testing'] 

    def post_nms_top_n(self):
        if self.training:
            return self._post_nms_top_n['training']
        return self._post_nms_top_n['testing']

    def assign_targets_to_anchors(self, anchors, targets):
        # type: (List[Tensor], List[Dict[str, Tensor]]) -> Tuple[List[Tensor], List[Tensor]]
        """
        计算每个anchors最匹配的gt，并划分为正样本，背景以及废弃的样本
        Args：
            anchors: (List[Tensor])
            targets: (List[Dict[Tensor])
        Returns:
            labels: 标记anchors归属类别（1, 0, -1分别对应正样本，背景，废弃的样本）
                    注意，在RPN中只有前景和背景，所有正样本的类别都是1，0代表背景
            matched_gt_boxes：与anchors匹配的gt
        """
        ## 存放所有anchor的类别  类型为List[Tensor] len=batch size
        ## Tensor的长度与anchor数量一致  其中正样本anchor数值为1  负样本anchor数值为0  舍弃样本anchor数值为-1
        labels = []
        ## 每个anchor所对应的GT box
        matched_gt_boxes = []
        # 遍历batch中每个训练数据  
        ## anchors_per_image是一个训练数据中的所有anchor
        ## targets_per_image是一个训练数据的标签信息  其中key=boxes的是GT box的坐标信息
        for anchors_per_image, targets_per_image in zip(anchors, targets):
            # 获取坐标信息
            gt_boxes = targets_per_image["boxes"]
            # 判断gt_boxes中是否包含元素 容错代码 正常不会进入  直接进入else
            if gt_boxes.numel() == 0:
                device = anchors_per_image.device
                matched_gt_boxes_per_image = torch.zeros(anchors_per_image.shape, dtype=torch.float32, device=device)
                labels_per_image = torch.zeros((anchors_per_image.shape[0],), dtype=torch.float32, device=device)
            else:
                # 计算anchors与真实bbox的iou信息
                ## 计算IOU方法，不做过多解释，网上资料是在是太多了 简单来说重叠面积/交叠面积   方便计算一般用重叠面积/(box1面积+box2面积-重叠面积)
                ## 这里值得注意的是  得到的结果同样是Tensor类型  match_quality_matrix.shape=[num_gt_box, num_anchors]
                ## 就是每个GT box和anchor都互相计算了iou
                match_quality_matrix = box_ops.box_iou(gt_boxes, anchors_per_image)
                '''
                在选取前景，背景以及废弃的anchors时对应论文中的样本选择，选择样本用作计算loss
                规则总结如下
                    1.与GT box的iou大于0.7(这个值可以自己设置)为正样本
                    2.与GT box的iou小于0.3(这个值可以自己设置)为负样本
                    3.与GT box的iou处于0.3与0.7之间的舍弃不做计算
                '''
                # 计算每个anchors与gt匹配iou最大的索引（如果iou<0.3索引置为-1，0.3<iou<0.7索引为-2）
                ## 为anchor分配批到到的GT
                ## 这里得到的结果和anchor的数量是一直的，但是对anchor进行了分类，-1表示anchor为负样本，-2表示舍弃的样本，其他则表示匹配到GT box且数值是索引
                matched_idxs = self.proposal_matcher(match_quality_matrix)
                # get the targets corresponding GT for each proposal
                # NB: need to clamp the indices because we can have a single
                # GT in the image, and matched_idxs can be -2, which goes
                # out of bounds
                ## 这一步筛选出所有正样本，负样本和舍弃的样本不算在内
                ## 计算边界框损失时只用正样本
                matched_gt_boxes_per_image = gt_boxes[matched_idxs.clamp(min=0)]

                ## 得到的labels_per_image的长度与anchor数量一致，值是True or False，正样本的anchor对应True，其他则是False
                labels_per_image = matched_idxs >= 0
                ## 转成float32后正样本=1 其他=0(True=1,False=0)
                labels_per_image = labels_per_image.to(dtype=torch.float32)

                # background (negative examples)
                ## 得到的bg_indices的长度与anchor数量一致，值是True or False，负样本的anchor对应True，其他则是False
                bg_indices = matched_idxs == self.proposal_matcher.BELOW_LOW_THRESHOLD  # -1
                labels_per_image[bg_indices] = 0.0

                # discard indices that are between thresholds
                ## 得到的inds_to_discard的长度与anchor数量一致，值是True or False，舍弃样本的anchor对应True，其他则是False
                inds_to_discard = matched_idxs == self.proposal_matcher.BETWEEN_THRESHOLDS  # -2
                labels_per_image[inds_to_discard] = -1.0
                #此时labels_per_image中正样本对应的数值为1  负样本对应的数值为0  舍弃样本对应的数值为-1

            labels.append(labels_per_image)
            matched_gt_boxes.append(matched_gt_boxes_per_image)
        return labels, matched_gt_boxes

    def _get_top_n_idx(self, objectness, num_anchors_per_level):
        # type: (Tensor, List[int]) -> Tensor
        """
        获取每张预测特征图上预测概率排前pre_nms_top_n的anchors索引值
        Args:
            objectness: Tensor(每张图像的预测目标概率信息 )  [batch_size, total_anchor]
            num_anchors_per_level: List（每个预测特征层上的预测的anchors个数）  len=特征层数量
        Returns:

        """
        r = []  # 记录每个预测特征层上预测目标概率前pre_nms_top_n的索引信息
        # 记录偏移量，在计算索引时用到
        offset = 0
        # 遍历每个预测特征层上的预测目标概率信息
        ## split方法先说下第二个参数，这里的1表示在dim=1的维度上进行分割  第一个参数表示如何进行分割
        ## num_anchors_per_level是List类型 里面存储的数值是每个特征层上面的anchor数量
        ## 对于只有单特征层来说没什么变化，但是对于多特征层，将会按照原始特征层里包含的anchor进行拆分，而每次遍历拿到的是一个特征层里所有的分类得到的结果
        ## e.g. 假设objectness=[2, 12]  num_anchors_per_level=[3, 4, 5] 那么在第一次循环时得到[2, 3]前三个结果  第二次得到[2, 4]是从第四个开始往后数4个一次类推
        for ob in objectness.split(num_anchors_per_level, 1):
            if torchvision._is_tracing():
                num_anchors, pre_nms_top_n = _onnx_get_num_anchors_and_pre_nms_top_n(ob, self.pre_nms_top_n())
            else:
                num_anchors = ob.shape[1]  # 预测特征层上的预测的anchors个数
                ## 当pre_nms_top_n比num_anchors小时取pre_nms_top_n
                ## 放pre_nms_top_n比num_anchors大时取所有anchor
                ## pre_nms_top_n()方法跟传入的参数有关
                pre_nms_top_n = min(self.pre_nms_top_n(), num_anchors)

            # Returns the k largest elements of the given input tensor along a given dimension
            ## topk是Tensor的方法  默认情况下是从大向小进行排序，在dim=1的维度上进行排序
            ## 传入的pre_nms_top_n表示排序后取多少个结果
            ## 返回值_表示排序后的值，而top_n_idx表示排序后的索引
            ## e.g. 假设ob=tensor([1, 2, 3, 4, 5])  ob.topk(3) 返回的_=tensor([5,4,3])  top_n_idx=tensor([4,3,2])
            _, top_n_idx = ob.topk(pre_nms_top_n, dim=1)
            ## 对于单特征层来说offset永远为0所以没关系
            ## 对于多特征层因为每次遍历都在切割 objectness  为了获取真实的索引，所以需要记录每次循环的偏移量
            ## e.g. 还是以刚才的例子假设objectness=[2, 12]  num_anchors_per_level=[3, 4, 5]  在第一次循环时offset=0 第二次offset=3 第三次offset=7
            r.append(top_n_idx + offset)
            offset += num_anchors
        # 将结果拼接在一起
        return torch.cat(r, dim=1)

    def filter_proposals(self, proposals, objectness, image_shapes, num_anchors_per_level):
        # type: (Tensor, Tensor, List[Tuple[int, int]], List[int]) -> Tuple[List[Tensor], List[Tensor]]
        """
        筛除小boxes框，nms处理，根据预测概率获取前post_nms_top_n个目标
        Args:
            proposals: 预测的bbox坐标  Tensor[shape=(batch_size, total_anchor, 4)]
            objectness: 预测的目标概率  Tensor[shape=(batch_size * total_anchor, 1)]
            image_shapes: batch中每张图片的size信息   List[Tuple[int, int]]  len=batch_size
            num_anchors_per_level: 每个预测特征层上预测anchors的数目   List[int] len=特征层的数量

        Returns:

        """
        # batch size
        num_images = proposals.shape[0]
        # 设备信息
        device = proposals.device

        # do not backprop throught objectness
        objectness = objectness.detach()
        ## reshape处理将batch_size重新reshape回来  [batch_size, total_anchor]
        objectness = objectness.reshape(num_images, -1)

        # Returns a tensor of size size filled with fill_value
        # levels负责记录分隔不同预测特征层上的anchors索引信息
        ## 在每个循环中 idx表示特征层的序号以0开始  n表示当前特征层里包含的anchor综述
        ## torch.full里面第一个参数'(n, )'表示创建一个什么样shape的Tensor  本句话是创建一个长度为n的tensor
        ## 第二个参数'idx'表示填充值，填充的是特征层的序号   dtype与device就不做解释了  基础常识
        ## 当有只有一个特征层时只会产生一个tensor放在列表里   只循环了一次
        ## 多个特征层时会循环多次产生多个tensor放在列表里
        levels = [torch.full((n, ), idx, dtype=torch.int64, device=device)
                  for idx, n in enumerate(num_anchors_per_level)]
        # 在第0个维度上进行拼接   其实仔细想一下  这里得到的长度其实是所有特征层里所有cell的产生的anchor数量  简单点说就是一条图像数据产生的所有anchor
        ## 想一下为什么要这么做，对于单特征层输出的Backbone其实没什么区别，但是对于多特征层输出的Backbone，在前面的操作把所有特征层的anchor都放在一起了
        ## 有了levels就可以区分出哪个位置的anchor在第几层，因为长度相同 取相同位置得到的序号即可判断
        ## 此时levels.shape=[total_anchor]
        levels = torch.cat(levels, 0)

        # levels.reshape(1, -1)将levels的shape从[total_anchor]变成 [1, total_anchor]
        # objectness的shape是[batch_size, total_anchor] 经过expand_as之后会在第一维度上复制levels 变成[batch_size, total_anchor]
        levels = levels.reshape(1, -1).expand_as(objectness)

        # select top_n boxes independently per level before applying nms
        # 获取每张预测特征图上预测概率排前pre_nms_top_n的anchors索引值
        ## pre_nms_top_n表示做nms前对每个预测特征层保留的目标个数  在初始化时传入
        top_n_idx = self._get_top_n_idx(objectness, num_anchors_per_level)

        # 给batch添加个序号 batch_idx=Tensor(shape=[batch_size, 1]) 填充值为batch的序号
        image_range = torch.arange(num_images, device=device)
        batch_idx = image_range[:, None]  # [batch_size, 1]

        # 根据每个预测特征层预测概率排前pre_nms_top_n的anchors索引值获取相应概率信息
        # 前面获取的top_n_idx是索引，这里将索引变成数值，将索引对应的数值取出来
        ## [batch_idx, top_n_idx]取值方式是切片的方式
        ## objectness、levels、proposals经过切片取值之后的shape都变成了[batch_size, 保留下来的anchor数量]
        objectness = objectness[batch_idx, top_n_idx]
        levels = levels[batch_idx, top_n_idx]
        # 预测概率排前pre_nms_top_n的anchors索引值获取相应bbox坐标信息
        proposals = proposals[batch_idx, top_n_idx]

        final_boxes = []
        final_scores = []
        # 遍历经过筛选后剩下的所有proposal及预测信息
        ## proposals是剩下的建议区域坐标
        ## objectness是前、背景预测结果
        ## levels是proposals处于第几层的序号
        ## 一个batch中所有图像的原始尺寸
        ## 这里proposals、objectness、levels都是在dim=0的维度展开，因为dim=0的维度是batch_size  所以针对batch展开时得到每个图像的预测结果和尺寸
        for boxes, scores, lvl, img_shape in zip(proposals, objectness, levels, image_shapes):
            #TODO 调整预测的boxes信息，将越界的坐标调整到图片边界上
            boxes = box_ops.clip_boxes_to_image(boxes, img_shape)
            #TODO 将小的proposal删除，返回boxes满足宽，高都大于min_size的索引
            keep = box_ops.remove_small_boxes(boxes, self.min_size)
            # 根据满足尺寸的索引进一步筛除proposal，当然同时要筛除的还有得分以及所在的特征层序号
            boxes, scores, lvl = boxes[keep], scores[keep], lvl[keep]
            # 进行NMS处理self.nms_thresh是NMS中iou阈值
            ## 这里得到的结果是执行NMS之后根据scores排序后输出的索引   注意 这里还是索引
            keep = box_ops.batched_nms(boxes, scores, lvl, self.nms_thresh)
            # 只需要获取post_nms_top_n个目标，所以这里用切片切一下排好序的结果得到前post_nms_top_n个结果
            keep = keep[: self.post_nms_top_n()]
            # 用切片形式将索引转换成数值
            boxes, scores = boxes[keep], scores[keep]
            final_boxes.append(boxes)
            final_scores.append(scores)
        # final_boxes与final_scores都是List[Tensor]形式   len=batch size
        # 里面的tensor.shape=[post_nms_top_n, 4]
        return final_boxes, final_scores

    def compute_loss(self, objectness, pred_bbox_deltas, labels, regression_targets):
        # type: (Tensor, Tensor, List[Tensor], List[Tensor]) -> Tuple[Tensor, Tensor]
        """
        计算RPN损失，包括类别损失（前景与背景），bbox regression损失
        Arguments:
            objectness (Tensor)：预测的前景概率
            pred_bbox_deltas (Tensor)：预测的bbox regression
            labels (List[Tensor])：真实的标签 1, 0, -1（batch中每一张图片的labels对应List的一个元素中）
            regression_targets (List[Tensor])：真实的bbox regression

        Returns:
            objectness_loss (Tensor) : 类别损失
            box_loss (Tensor)：边界框回归损失
        """
        print("objectness is:")
        print(objectness)
        print("pred_bbox_deltas is:")
        print(pred_bbox_deltas)
        print("labels is:")
        print(labels)
        print("regression_targets is:")
        print(regression_targets)
        # 按照给定的batch_size_per_image, positive_fraction选择正负样本
        ## 前面对样本(anchor)进行了分类，这里是对样本进行采样，不是所有的正样本及负样本都拿来训练
        ## sampled_pos_inds.shape=[num_anchor, 1]  值为1的位置表示选取正样本  其他位置为0
        ## sampled_neg_inds.shape=[num_anchor, 1]  值为1的位置表示选取负样本  其他位置为0
        sampled_pos_inds, sampled_neg_inds = self.fg_bg_sampler(labels)
        # 将一个batch中的所有正负样本List(Tensor)分别拼接在一起，并获取非零位置的索引
        ## sampled_pos_inds与sampled_neg_inds都是List，当中存储的一个batch的正负样本索引
        ## 拼接之后再求得正/负样本的索引
        ## sampled_pos_inds存储的是选取的正样本的索引
        ## sampled_neg_inds存储的是选取的负样本的索引
        sampled_pos_inds = torch.nonzero(torch.cat(sampled_pos_inds, dim=0)).squeeze(1)
        sampled_neg_inds = torch.nonzero(torch.cat(sampled_neg_inds, dim=0)).squeeze(1)

        # 将所有正负样本索引拼接在一起 得到计算损失的所有样本
        ## 同学们可以思考下为什么这里不区分正负样本了？？
        ## 其实正负样本的选取是为了平衡正负样本，而不管是正样本还是负样本，不影响计算过程
        sampled_inds = torch.cat([sampled_pos_inds, sampled_neg_inds], dim=0)
        # 展平分类结果
        ## objectness并没有改变顺序  原始shape为[num_anchor, 1] -> [num_anchor]
        objectness = objectness.flatten()
        # 将labels拼接成一个Tensor 原始是根据batch中数据拆分的
        labels = torch.cat(labels, dim=0)
        # 向上面一样将真是边框回归值拼接
        regression_targets = torch.cat(regression_targets, dim=0)

        # 计算边界框回归损失
        ## 计算边界框回归时只需要计算正样本的，所以下面只有正样本参与计算
        ## faster rcnn中rpn的边界框回归损失使用的是smooth l1 loss，网上资料很多，这里不做过多的赘述
        box_loss = det_utils.smooth_l1_loss(
            pred_bbox_deltas[sampled_pos_inds],
            regression_targets[sampled_pos_inds],
            beta=1 / 9,
            size_average=False,
        ) / (sampled_inds.numel())

        # 计算目标预测概率损失
        ## BCE loss的资料更多  也不多做解释
        ## 但是这里一定要说一下为什么不是 softmax cross entropy  因为这里是二分类而不是多分类
        ## 这里会进行sigmoid处理，对应前面RPNHead里的解释
        objectness_loss = F.binary_cross_entropy_with_logits(
            objectness[sampled_inds], labels[sampled_inds]
        )

        return objectness_loss, box_loss

    def forward(self,
                images,        # type: ImageList
                features,      # type: Dict[str, Tensor]
                targets=None   # type: Optional[List[Dict[str, Tensor]]]
                ):
        # type: (...) -> Tuple[List[Tensor], Dict[str, Tensor]]
        """
        Arguments:
            images (ImageList): 第一节课中讲的ImageList类，其中包含图像对应的Tensor以及原始图像的尺寸
            features (Dict[Tensor]): 前面结构处理后的特征矩阵
            targets (List[Dict[Tensor]): GT box信息

        Returns:
            boxes (List[Tensor]): the predicted boxes from the RPN, one Tensor per
                image.
            losses (Dict[Tensor]): the losses for the model during training. During
                testing, it is an empty dict.
        """
        # RPN uses all feature maps that are available
        # features是所有预测特征层组成的OrderedDict
        ## features是OrderedDict(有序字典)类型，values是取得其中所有的value，由于是有序字典，这里得到的values也是有序的
        ## 解释下字典内容，对于单输出的backbone，features里只有1个即输出的feature map
        ## 对于多输出backbone，以本实现代码中写好的resnet50+fpn举例  有5个feature map输出 key值分别为0, 1, 2, 3, pool  
        ## value中的每个输出结构都是[batch_size, channels, feature_height, feature_width]
        ## 建议同学们在一开始理解代码的时候以单输出backbone去理解，有了整体的认识之后再去理解多输出backbone
        ## 这步之后features变成List[Tensor]
        features = list(features.values())

        # 计算每个预测特征层上的预测目标概率和bboxes regression参数
        ## RPNHead在前面内容有讲解，复习下，2个输出分别为滑动窗口输出经过前背景分类器后产生的结果与滑动窗口输出经过边框回归分类器后产生的结果
        ## objectness和pred_bbox_deltas都是list[torch.Tensor]，原因是兼容FPN多输出结构是
        ## objectness中的Tensor的shape为[batch_size, K, height, width]
        ## pred_bbox_deltas中的Tensor的shape为[batch_size, 4*k, height, width]
        ## K为每个cell中anchor的数量
        objectness, pred_bbox_deltas = self.head(features)

        ## AnchorGenerator在前面内容有讲解，传入参数分别为原始输入图像信息与backbone输出的features
        ## 生成一个batch图像的所有anchors信息,List[Tensor]元素个数等于batch_size
        ## List中的Tensor结构为[num_anchors, 4]，存储单个训练数据中所有anchor信息
        anchors = self.anchor_generator(images, features)

        # len(anchors) = batch size
        num_images = len(anchors)

        # 两句合起来计算每个预测特征层上的对应的anchors数量
        # num_anchors_per_level_shape_tensors = List[torch.size([num_anchor_pre_cell, height, width])]
        num_anchors_per_level_shape_tensors = [o[0].shape for o in objectness]
        # num_anchor_pre_cell * height * width=number of anchors   (height * width = number of cells)
        num_anchors_per_level = [s[0] * s[1] * s[2] for s in num_anchors_per_level_shape_tensors]

        # 调整内部tensor格式以及shape
        ## 执行之后 objectness.shape=[batch_size * total_anchor, 1]
        ## pred_bbox_deltas.shape=[batch_size * total_anchor, 4] 
        ## 这里说的total anchor指所有特征层的anchor相加的结果
        objectness, pred_bbox_deltas = concat_box_prediction_layers(objectness,
                                                                    pred_bbox_deltas)

        # apply pred_bbox_deltas to anchors to obtain the decoded proposals
        # note that we detach the deltas because Faster R-CNN do not backprop through
        # the proposals
        # 将预测的bbox regression参数应用到anchors上得到proposal的坐标
        proposals = self.box_coder.decode(pred_bbox_deltas.detach(), anchors)
        # 这一步再拆回成[batch_size, num_anchor, 4]
        proposals = proposals.view(num_images, -1, 4)

        # 筛除小boxes框，nms处理，根据预测概率获取前post_nms_top_n个目标
        boxes, scores = self.filter_proposals(proposals, objectness, images.image_sizes, num_anchors_per_level)

        losses = {}
        # 判断是否是训练，训练才计算损失
        if self.training:
            assert targets is not None
            
            # 计算每个anchors最匹配的gt，并将anchors进行分类，前景，背景以及废弃的anchors
            ## anchors是所有的anchor  类型为List[Tensor] len为batch size   target是List[Dict] len为batch size
            ## labels是每张图片所对应的labels  其中正样本anchor数值为1  负样本anchor数值为0  舍弃样本anchor数值为-1
            ## matched_gt_boxes是每个anchor所对用的GT box
            labels, matched_gt_boxes = self.assign_targets_to_anchors(anchors, targets)
            # 结合anchors以及对应的gt，计算regression参数
            ## 这里是应该得到的回归参数，用anchor与GT box计算真是情况下回归参数  用作计算损失时当做标签使用
            ## regression_targets.shape=[num_anchor, 4]
            regression_targets = self.box_coder.encode(matched_gt_boxes, anchors)
            ## 有了真是值及正负样本，可以进行计算loss
            ## objectness是RPNHead分类结果
            ## pred_bbox_deltas是RPNHead回归结果
            ## labels是anchor的分类  正样本(1)、负样本(0)、舍弃样本(-1)
            ## regression_targets是anchor对应的真实回归值  也就是对应标签的回归值
            loss_objectness, loss_rpn_box_reg = self.compute_loss(
                objectness, pred_bbox_deltas, labels, regression_targets
            )
            # 以dict的形式保存loss
            losses = {
                "loss_objectness": loss_objectness,
                "loss_rpn_box_reg": loss_rpn_box_reg
            }
        # 返回结果
        return boxes, losses
