import torch
import math
from torch.jit.annotations import List, Tuple
from torch import Tensor
import torchvision


# TODO: https://github.com/pytorch/pytorch/issues/26727
def zeros_like(tensor, dtype):
    # type: (Tensor, int) -> Tensor
    return torch.zeros_like(tensor, dtype=dtype, layout=tensor.layout,
                            device=tensor.device, pin_memory=tensor.is_pinned())


@torch.jit.script
class BalancedPositiveNegativeSampler(object):
    """
    This class samples batches, ensuring that they contain a fixed proportion of positives
    """

    def __init__(self, batch_size_per_image, positive_fraction):
        # type: (int, float)
        """
        Arguments:
            batch_size_per_image (int): 选取样本总数
            positive_fraction (float): 正样本所占比例
        """
        self.batch_size_per_image = batch_size_per_image
        self.positive_fraction = positive_fraction

    def __call__(self, matched_idxs):
        # type: (List[Tensor])
        """
        Arguments:
            matched idxs: list of tensors containing -1, 0 or positive values.
                Each tensor corresponds to a specific image.
                -1 values are ignored, 0 are considered as negatives and > 0 as
                positives.

        Returns:
            pos_idx (list[tensor])
            neg_idx (list[tensor])

        Returns two lists of binary masks for each image.
        The first list contains the positive elements that were selected,
        and the second list the negative example.
        """
        pos_idx = []
        neg_idx = []
        # 遍历每张图像的matched_idxs
        ## matched_idxs是样本分类  正样本=1 负样本=0  舍弃样本=-1
        for matched_idxs_per_image in matched_idxs:
            ##在拿到正样本与负样本索引时  绝大部分都是负样本  很小一部分是正样本
            ## anchor的数量动则几十上百万个  而正样本往多里说一张图片中百十来个待检测的物体已经是非常多了

            # 根据>=1的条件筛选出正样本的索引   这里正样本的索引
            positive = torch.nonzero(matched_idxs_per_image >= 1).squeeze(1)
            # 根据==0的条件筛选出负样本的索引   这里是负样本的索引
            negative = torch.nonzero(matched_idxs_per_image == 0).squeeze(1)

            # 指定正样本的数量
            num_pos = int(self.batch_size_per_image * self.positive_fraction)
            # protect against not enough positive examples
            # 如果正样本数量不够就直接采用所有正样本
            ## 大部分情况下 正样本数量不足   这里取其中较小者
            num_pos = min(positive.numel(), num_pos)
            # 指定负样本数量
            num_neg = self.batch_size_per_image - num_pos
            # protect against not enough negative examples
            # 如果负样本数量不够就直接采用所有负样本
            num_neg = min(negative.numel(), num_neg)

            # randomly select positive and negative examples
            # Returns a random permutation of integers from 0 to n - 1.
            # 随机选择指定数量的正负样本
            ## torch.randperm将传入参数进行随机排序，排序后再按切片取取得的就是随机结果
            ## 这里的perm1和perm2是索引 是positive/negative的索引
            perm1 = torch.randperm(positive.numel(), device=positive.device)[:num_pos]
            perm2 = torch.randperm(negative.numel(), device=negative.device)[:num_neg]

            ## 按照索引取出真正样本的索引（positive与negative本身就是索引，perm1和perm2是索引的索引……比较绕）
            pos_idx_per_image = positive[perm1]
            neg_idx_per_image = negative[perm2]

            # create binary mask from indices
            ## 创建一个和matched_idxs_per_image的shape相同但值都为0的模板
            ## matched_idxs_per_image.shape=[num_anchor, 1]
            pos_idx_per_image_mask = zeros_like(
                matched_idxs_per_image, dtype=torch.uint8
            )
            neg_idx_per_image_mask = zeros_like(
                matched_idxs_per_image, dtype=torch.uint8
            )

            # 根据正负样本的索引(在anchor中的索引)将模板中的位置设置为1
            pos_idx_per_image_mask[pos_idx_per_image] = 1
            neg_idx_per_image_mask[neg_idx_per_image] = 1

            pos_idx.append(pos_idx_per_image_mask)
            neg_idx.append(neg_idx_per_image_mask)

        return pos_idx, neg_idx


@torch.jit.script
def encode_boxes(reference_boxes, proposals, weights):
    # type: (torch.Tensor, torch.Tensor, torch.Tensor) -> torch.Tensor
    """
    Encode a set of proposals with respect to some
    reference boxes

    Arguments:
        reference_boxes (Tensor): reference boxes(gt)
        proposals (Tensor): boxes to be encoded(anchors)
    """

    # perform some unpacking to make it JIT-fusion friendly
    # 都是1 都是1 都是1  重复三遍都是1
    wx = weights[0]
    wy = weights[1]
    ww = weights[2]
    wh = weights[3]

    # unsqueeze()
    # Returns a new tensor with a dimension of size one inserted at the specified position.
    ## 分别获取anchorxmin ymin xmax ymax
    proposals_x1 = proposals[:, 0].unsqueeze(1)
    proposals_y1 = proposals[:, 1].unsqueeze(1)
    proposals_x2 = proposals[:, 2].unsqueeze(1)
    proposals_y2 = proposals[:, 3].unsqueeze(1)

    ## 分别获取anchor对应GT box的xmin ymin xmax ymax
    reference_boxes_x1 = reference_boxes[:, 0].unsqueeze(1)
    reference_boxes_y1 = reference_boxes[:, 1].unsqueeze(1)
    reference_boxes_x2 = reference_boxes[:, 2].unsqueeze(1)
    reference_boxes_y2 = reference_boxes[:, 3].unsqueeze(1)

    # implementation starts here
    # parse widths and heights
    # 计算anchor的宽和高
    # width = xmax - xmin
    # height = ymax - ymin
    ex_widths = proposals_x2 - proposals_x1
    ex_heights = proposals_y2 - proposals_y1
    # parse coordinate of center point
    # 计算anchor中心点  
    # center_x = xmin + 0.5*width
    # center_y = ymin + 0.5*height
    ex_ctr_x = proposals_x1 + 0.5 * ex_widths
    ex_ctr_y = proposals_y1 + 0.5 * ex_heights

    ## 同理得到 GT box的宽高以及中心点的坐标
    gt_widths = reference_boxes_x2 - reference_boxes_x1
    gt_heights = reference_boxes_y2 - reference_boxes_y1
    gt_ctr_x = reference_boxes_x1 + 0.5 * gt_widths
    gt_ctr_y = reference_boxes_y1 + 0.5 * gt_heights

    # 根据前文中给出的公式，带入计算得到真是偏移量
    targets_dx = wx * (gt_ctr_x - ex_ctr_x) / ex_widths
    targets_dy = wy * (gt_ctr_y - ex_ctr_y) / ex_heights
    targets_dw = ww * torch.log(gt_widths / ex_widths)
    targets_dh = wh * torch.log(gt_heights / ex_heights)
    # 将真是偏移量在dim=1维度上拼接
    targets = torch.cat((targets_dx, targets_dy, targets_dw, targets_dh), dim=1)
    return targets


@torch.jit.script
class BoxCoder(object):
    """
    This class encodes and decodes a set of bounding boxes into
    the representation used for training the regressors.
    """

    def __init__(self, weights, bbox_xform_clip=math.log(1000. / 16)):
        # type: (Tuple[float, float, float, float], float)
        """
        Arguments:
            weights (4-element tuple)
            bbox_xform_clip (float)
        """
        self.weights = weights
        self.bbox_xform_clip = bbox_xform_clip

    def encode(self, reference_boxes, proposals):
        # type: (List[Tensor], List[Tensor])
        """
        结合anchors和与之对应的gt计算regression参数
        Args:
            reference_boxes: List[Tensor] gt_boxes  每个anchor对应的GT box坐标
            proposals: List[Tensor] anchors    每个anchor的坐标

        Returns: regression parameters

        """
        # 统计每张图像的正负样本数，方便后面拼接在一起处理后在分开
        # reference_boxes和proposal数据结构相同
        ## boxes_per_image是每张图像中anchor的数量
        boxes_per_image = [len(b) for b in reference_boxes]
        ## 把每张图像中anchor对应GT box的坐标进行拼接
        reference_boxes = torch.cat(reference_boxes, dim=0)
        ## 同样的讲每张图像中anchor的坐标进行拼接
        proposals = torch.cat(proposals, dim=0)

        # targets_dx, targets_dy, targets_dw, targets_dh
        targets = self.encode_single(reference_boxes, proposals)
        ## 前面为了计算方便，将batch中的数据拼接起来，这里再拆分开，数据anchor的真是偏移量按照batch中图片拆分开
        return targets.split(boxes_per_image, 0)

    def encode_single(self, reference_boxes, proposals):
        """
        Encode a set of proposals with respect to some
        reference boxes

        Arguments:
            reference_boxes (Tensor): reference boxes  按照batch内每张图片的所有anchor对应的GT box进行拼接   里面存储的是GT box的坐标
            proposals (Tensor): boxes to be encoded  按照batch内每张图片的所有anchor进行拼接  里面存储的是anchor的坐标
        """
        # 获取数据类型
        dtype = reference_boxes.dtype
        # 获取数据设备
        device = reference_boxes.device
        # 将weights转换为Tensor  实现里的weights都是1   切记 本实现内都是1  不用迷惑这个
        ## weights的作用是调整回归参数中x y w h的权重  默认情况下每个回归参数的权重都是1   在前期读代码中建议避免此参数造成理解上的困难， 就理解成1
        weights = torch.as_tensor(self.weights, dtype=dtype, device=device)
        ## 根据anchor坐标以及GT box的坐标计算真是偏移量  用作计算边框回归损失时的真实值
        targets = encode_boxes(reference_boxes, proposals, weights)

        return targets

    def decode(self, rel_codes, boxes):
        # type: (Tensor, List[Tensor])
        """

        Args:
            rel_codes: bbox regression parameters     Tensor[shape=[batch_size * total_anchor, 4]]
            boxes: anchors      List[Tensor[shape=(total_anchor, 4)]] len(boxes) = batch_size

        Returns:

        """
        # 断言  验证一下参数类型
        assert isinstance(boxes, (list, tuple))
        # if isinstance(rel_codes, (list, tuple)):
        #     rel_codes = torch.cat(rel_codes, dim=0)
        assert isinstance(rel_codes, torch.Tensor)
        # 获取每张图片生成box的数量
        boxes_per_image = [b.size(0) for b in boxes]
        # 在第0维度进行拼接，相当于把一个batch的数据拼接到一起
        ## 这里为什么要cat在一起，是因为前面对rpnhead的预测进行了cat操作
        ## concat_boxes的形状与rel_codes的形状一致  Tensor[shape=(batch_size * total_anchor, 4)]
        concat_boxes = torch.cat(boxes, dim=0)

        # 计算这个batch总共有多少个anchor
        box_sum = 0
        for val in boxes_per_image:
            box_sum += val
        # 将预测的bbox回归参数应用到对应anchors上得到预测bbox的坐标
        pred_boxes = self.decode_single(
            rel_codes.reshape(box_sum, -1), concat_boxes
        )
        return pred_boxes.reshape(box_sum, -1, 4)

    def decode_single(self, rel_codes, boxes):
        """
        From a set of original boxes and encoded relative box offsets,
        get the decoded boxes.

        Arguments:
            rel_codes (Tensor): encoded boxes (bbox regression parameters)
            boxes (Tensor): reference boxes (anchors)

        rel_codes=Tensor[shape=[batch_size * total_anchor, 4]]
        boxes=Tensor[shape=[batch_size * total_anchor, 4]]
        """
        # 统一boxes和rel_codes在同一设备并且类型相同
        boxes = boxes.to(rel_codes.dtype)

        # boxes存储的是xmin, ymin, xmax, ymax 相当于左上和右下角坐标  转换成中心点+宽高的形式
        widths = boxes[:, 2] - boxes[:, 0]   # anchor宽度
        heights = boxes[:, 3] - boxes[:, 1]  # anchor高度
        ctr_x = boxes[:, 0] + 0.5 * widths   # anchor中心x坐标
        ctr_y = boxes[:, 1] + 0.5 * heights  # anchor中心y坐标

        ## self.weights在本套代码中都是(1, 1, 1, 1) 不产生影响
        ## 0::4这种写法在python中表示间隔4取样，rel_codes的第二维度长度是4，这里为什么不直接取[:, 0]呢？
        ## 是因为如果用[:, 0]的结果维度是1，结果会是[batch_size * total_anchor]，而用[:, 0//4]得到的结果是[batch_size * total_anchor, 1]
        ## dx dy dw dh的形状都是一样的 [batch_size * total_anchor, 1] 分别保存的是中心点x坐标、中心点y坐标、anchor宽度、anchor高度的回归值
        wx, wy, ww, wh = self.weights  # 默认都是1
        dx = rel_codes[:, 0::4] / wx   # 预测anchors的中心坐标x回归参数
        dy = rel_codes[:, 1::4] / wy   # 预测anchors的中心坐标y回归参数
        dw = rel_codes[:, 2::4] / ww   # 预测anchors的宽度回归参数
        dh = rel_codes[:, 3::4] / wh   # 预测anchors的高度回归参数

        # limit max value, prevent sending too large values into torch.exp()
        # torch.clamp限制传入值的最大/最小值  在本代码限制的是最大值，且最大值默认为math.log(1000. / 16)
        ## 这里为什么采用这样的数值，是官方代码默认给出的，可能是尝试后给出的值
        ## 官方给出的解释是防止指数爆炸
        dw = torch.clamp(dw, max=self.bbox_xform_clip)
        dh = torch.clamp(dh, max=self.bbox_xform_clip)

        ## 这里按照转换公式，将预测的回归值与anchor的坐标组合生成最终proposal的坐标，参照前面文档中公式
        ## widths[:, None]为什么要写成这样的形式，在第一节课里有涉及到类似的内容，是扩充一个维度，widths只有一个维度，与dx、dy等维度不符合，转换一下
        pred_ctr_x = dx * widths[:, None] + ctr_x[:, None]
        pred_ctr_y = dy * heights[:, None] + ctr_y[:, None]
        pred_w = torch.exp(dw) * widths[:, None]
        pred_h = torch.exp(dh) * heights[:, None]

        # 将中心点+宽高的坐标形式转换成xmin、ymin、xmax、ymax形式
        ## 下方得到的4个结果的shape都是 [batch_size * total_anchor, 1]
        # xmin 中心点坐标pred_ctr_x 减去0.5倍宽度
        pred_boxes1 = pred_ctr_x - torch.tensor(0.5, dtype=pred_ctr_x.dtype, device=pred_w.device) * pred_w
        # ymin 中心点坐标pred_ctr_x 减去0.5倍高度
        pred_boxes2 = pred_ctr_y - torch.tensor(0.5, dtype=pred_ctr_y.dtype, device=pred_h.device) * pred_h
        # xmax 中心点坐标pred_ctr_x 加上0.5倍宽度
        pred_boxes3 = pred_ctr_x + torch.tensor(0.5, dtype=pred_ctr_x.dtype, device=pred_w.device) * pred_w
        # ymax 中心点坐标pred_ctr_x 加上0.5倍高度
        pred_boxes4 = pred_ctr_y + torch.tensor(0.5, dtype=pred_ctr_y.dtype, device=pred_h.device) * pred_h
        # 组装成[batch_size * total_anchor, 4]的形式
        ## torch.stack中指定dim=2  由于传入的参数都只有2个维度，这样会新增1个维度变成[batch_size * total_anchor, 1, 4]
        ## 所以在dim=1的维度进行展平(flatten(1))，得到[batch_size * total_anchor, 4]
        pred_boxes = torch.stack((pred_boxes1, pred_boxes2, pred_boxes3, pred_boxes4), dim=2).flatten(1)
        return pred_boxes


@torch.jit.script
class Matcher(object):
    BELOW_LOW_THRESHOLD = -1
    BETWEEN_THRESHOLDS = -2

    __annotations__ = {
        'BELOW_LOW_THRESHOLD': int,
        'BETWEEN_THRESHOLDS': int,
    }

    def __init__(self, high_threshold, low_threshold, allow_low_quality_matches=False):
        # type: (float, float, bool)
        """
        Args:
            high_threshold (float): quality values greater than or equal to
                this value are candidate matches.
            low_threshold (float): a lower quality threshold used to stratify
                matches into three levels:
                1) matches >= high_threshold
                2) BETWEEN_THRESHOLDS matches in [low_threshold, high_threshold)
                3) BELOW_LOW_THRESHOLD matches in [0, low_threshold)
            allow_low_quality_matches (bool): if True, produce additional matches
                for predictions that have only low-quality match candidates. See
                set_low_quality_matches_ for more details.
        """
        # 小于low_threshold时所赋的值
        self.BELOW_LOW_THRESHOLD = -1
        # 大于low_threshold且小于high_threshold时所赋的值
        self.BETWEEN_THRESHOLDS = -2
        assert low_threshold <= high_threshold
        self.high_threshold = high_threshold
        self.low_threshold = low_threshold
        # 特殊说明下这个参数  当这个参数为true时表示，与GT box所匹配的anchor(最大iouanchor)不管是否满足iou大于high_threshold的条件都认定为正样本
        # 如果为负 即表示匹配的anchor不满足大于high_threshold时可能会被丢弃或者认定为负样本
        self.allow_low_quality_matches = allow_low_quality_matches

    def __call__(self, match_quality_matrix):
        """
        计算anchors与每个gtboxes匹配的iou最大值，并记录索引，
        iou<low_threshold索引值为-1， low_threshold<=iou<high_threshold索引值为-2
        Args:
            match_quality_matrix (Tensor[float]): an MxN tensor, containing the
            pairwise quality between M ground-truth elements and N predicted elements.

        Returns:
            matches (Tensor[int64]): an N tensor where N[i] is a matched gt in
            [0, M - 1] or a negative value indicating that prediction i could not
            be matched.
        """
        ## 判断是否有值  容错代码
        if match_quality_matrix.numel() == 0:
            # empty targets or proposals not supported during training
            if match_quality_matrix.shape[0] == 0:
                raise ValueError(
                    "No ground-truth boxes available for one of the images "
                    "during training")
            else:
                raise ValueError(
                    "No proposal boxes available for one of the images "
                    "during training")

        # match_quality_matrix is M (gt) x N (predicted)
        # Max over gt elements (dim 0) to find best gt candidate for each prediction
        # M x N 的每一列代表一个anchors与所有gt的匹配iou值
        # matched_vals代表每列的最大值，即每个anchors与所有gt匹配的最大iou值
        # matches对应最大值所在的索引
        ## 在dim=0的维度上求最大值 也就是对anchor求最大iou的GT box
        ## e.g. 下面做个表格假数据来举例
        ##      anchor1  anchor2  anchor3
        ##  GT1   0.2       0.3     0.9
        ##  GT3   0.6       0.8     0.1
        ## matched_vals返回的是最大值  也就是 Tensor([0.6,0.8,0.9])
        ## matches返回的是最大值的索引  也就是 Tensor([1, 1, 0])
        matched_vals, matches = match_quality_matrix.max(dim=0)  # the dimension to reduce.
        if self.allow_low_quality_matches:
            all_matches = matches.clone()
        else:
            all_matches = None

        # Assign candidate matches with low quality to negative (unassigned) values
        # 计算iou小于low_threshold的索引
        ## 存储的是True or False  表明是否满足条件   shape与matched_vals一致
        below_low_threshold = matched_vals < self.low_threshold
        # 计算iou在low_threshold与high_threshold之间的索引值
        ## 与上一句相同  只是条件发生了变化 处理low_threshold与high_threshold之间
        between_thresholds = (matched_vals >= self.low_threshold) & (matched_vals < self.high_threshold)
        '''
        为什么要单独对负样本及舍弃样本的最大iou索引单独设置
        舍弃样本不用说  不参与后面的计算 所以设置为-2 标记为舍弃样本
        负样本表示没有对应的GT box  也就是背景  不需要索引
        '''
        # iou小于low_threshold的matches索引置为-1   负样本
        matches[below_low_threshold] = self.BELOW_LOW_THRESHOLD  # -1
        # iou在[low_threshold, high_threshold]之间的matches索引置为-2   舍弃样本
        matches[between_thresholds] = self.BETWEEN_THRESHOLDS    # -2

        if self.allow_low_quality_matches:
            assert all_matches is not None
            ## matches表示每个anchor匹配的GT box索引  但是对负样本的索引设置为-1  舍弃样本的索引设置为-2
            ## all_matches则是最原始匹配到的GT box
            ## match_quality_matrix为GT box与anchor的iou矩阵
            self.set_low_quality_matches_(matches, all_matches, match_quality_matrix)

        return matches

    def set_low_quality_matches_(self, matches, all_matches, match_quality_matrix):
        """
        Produce additional matches for predictions that have only low-quality matches.
        Specifically, for each ground-truth find the set of predictions that have
        maximum overlap with it (including ties); for each prediction in that set, if
        it is unmatched, then match it to the ground-truth with which it has the highest
        quality value.
        """
        # For each gt, find the prediction with which it has highest quality
        # 对于每个gt boxes寻找与其iou最大的anchor，
        # highest_quality_foreach_gt为匹配到的最大iou值
        ## 针对dim=1的维度取最大值  这与前面的操作相反  意思是在GT box的角度找最为匹配的anchor
        ## e.g. 下面做个表格假数据来举例
        ##      anchor1  anchor2  anchor3
        ##  GT1   0.2       0.3     0.9
        ##  GT3   0.6       0.8     0.1
        ## highest_quality_foreach_gt返回的是最大值  也就是 Tensor([0.9, 0.8])
        ## _返回的是最大值的索引  也就是 Tensor([2, 1])
        highest_quality_foreach_gt, _ = match_quality_matrix.max(dim=1)  # the dimension to reduce.

        # Find highest quality match available, even if it is low, including ties
        # 寻找每个gt boxes与其iou最大的anchor索引，一个gt匹配到的最大iou可能有多个anchor
        # 将match_quality_matrix与highest_quality_foreach_gt相对比
        ## highest_quality_foreach_gt是一维，而match_quality_matrix是二维  所以用highest_quality_foreach_gt[:, None]的形式转成二维
        ## 举例说明返回结果
        ## e.g. 下面做个表格假数据来举例
        ##      anchor1  anchor2  anchor3
        ##  GT1   0.2       0.3     0.9
        ##  GT3   0.6       0.8     0.1
        ## 这个表格也就是match_quality_matrix
        ## highest_quality_foreach_gt=Tensor([0.9, 0.8])
        ## gt_pred_pairs_of_highest_quality=Tensor([[0, 2], [1, 1]])
        ## 也就是满足值相等的位置的坐标
        ## 注意注意注意  这里gt_pred_pairs_of_highest_quality.shape不一定等于[num_gt, 2] 因为是数值对比，可能存在多个相同的iou框，所以是[>=num_gt, 2]
        gt_pred_pairs_of_highest_quality = torch.nonzero(
            match_quality_matrix == highest_quality_foreach_gt[:, None]
        )
        # Example gt_pred_pairs_of_highest_quality:
        #   tensor([[    0, 39796],
        #           [    1, 32055],
        #           [    1, 32070],
        #           [    2, 39190],
        #           [    2, 40255],
        #           [    3, 40390],
        #           [    3, 41455],
        #           [    4, 45470],
        #           [    5, 45325],
        #           [    5, 46390]])
        # Each row is a (gt index, prediction index)
        # Note how gt items 1, 2, 3, and 5 each have two ties

        # gt_pred_pairs_of_highest_quality[:, 0]代表是对应的gt index(不需要)
        ## 这里怎么理解  对于match_quality_matrix矩阵第一个索引是GT box的索引  第二个索引是anchor的索引
        ## 而gt_pred_pairs_of_highest_quality返回的是满足最大iou条件的anchor在match_quality_matrix矩阵中的位置
        ## 那么这个位置的x坐标是GT box的索引，而y坐标是anchor的索引，这里只拿到anchor的索引即可
        pre_inds_to_update = gt_pred_pairs_of_highest_quality[:, 1]
        # 保留该anchor匹配gt最大iou的索引，即使iou低于设定的阈值
        ## 这一步可能有的同学觉得没意义，如果每个GT box与之iou最大的anchor的iou都大于self.high_threshold则真的没意义
        ## 如果有GT box匹配的anchor的iou不满足大于self.high_threshold时  在前面的操作可能被舍弃或者算作负样本，这一步是将其变回正样本
        matches[pre_inds_to_update] = all_matches[pre_inds_to_update]


def smooth_l1_loss(input, target, beta: float = 1. / 9, size_average: bool = True):
    """
    very similar to the smooth_l1_loss from pytorch, but with
    the extra beta parameter
    """
    n = torch.abs(input - target)
    cond = n < beta
    loss = torch.where(cond, 0.5 * n ** 2 / beta, n - 0.5 * beta)
    if size_average:
        return loss.mean()
    return loss.sum()
