# -*- coding: utf-8 -*-
# author： woldier wong
# datetime： 3/12/24 3:15 AM
# ide： PyCharm

import math
from typing import List, Optional

import torch
from torch import nn, Tensor
from torchvision.models.detection.image_list import ImageList


class AnchorGenerator(nn.Module):
    """
    Module that generates anchors for a set of feature maps and
    image sizes.
    为一批特征图生成anchors的model

    The module support computing anchors at multiple sizes and aspect ratios
    per feature map. This module assumes aspect ratio = height / width for
    each anchor.
    模型支持对不同的size不同的高宽比计算anchors. 本模型中约定对于每一个anchors, aspect ratio = height / width.

    sizes and aspect_ratios should have the same number of elements, and it should
    correspond to the number of feature maps.
    size 和 aspect_ratios 具有相同的元素, 且与特征图的数量一致.

    sizes[i] and aspect_ratios[i] can have an arbitrary number of elements,
    and AnchorGenerator will output a set of sizes[i] * aspect_ratios[i] anchors
    per spatial location for feature map i.
    sizes[i] 和 aspect_ratios[i]可以具有随意多的元素个数, AnchorGenerator将会对特称图的第i个通道输出
    len(sizes[i])* len(aspect_ratios[i])个anchors

    Args:
        sizes (Tuple[Tuple[int]]):
        aspect_ratios (Tuple[Tuple[float]]):
    """

    __annotations__ = {
        "cell_anchors": List[torch.Tensor],
    }

    def __init__(
            self,
            sizes=((128, 256, 512),),  # 大小, 这里时一个二维数组, 第一维是说特征图的不同的channel可以有不一样大小的候选框, 第二维度才是某channel中的候选框的size
            aspect_ratios=((0.5, 1.0, 2.0),),  # 宽高比, 也是一个二维数组.
    ):
        super().__init__()
        # 如果 给出的sizes 不是一个二维的
        if not isinstance(sizes[0], (list, tuple)):
            # TODO change this
            sizes = tuple((s,) for s in sizes)
        if not isinstance(aspect_ratios[0], (list, tuple)):
            aspect_ratios = (aspect_ratios,) * len(sizes)

        self.sizes = sizes
        self.aspect_ratios = aspect_ratios
        self.cell_anchors = [
            self.generate_anchors(size, aspect_ratio) for size, aspect_ratio in zip(sizes, aspect_ratios)
        ]

    # TODO: https://github.com/pytorch/pytorch/issues/26792
    # For every (aspect_ratios, scales) combination, output a zero-centered anchor with those values.
    # (scales, aspect_ratios) are usually an element of zip(self.scales, self.aspect_ratios)
    # This method assumes aspect ratio = height / width for an anchor.
    def generate_anchors(
            self,
            scales: List[int],
            aspect_ratios: List[float],
            dtype: torch.dtype = torch.float32,
            device: torch.device = torch.device("cpu"),
    ):
        """
        通过scales, aspect_ratios计算0,0 为中心的 ( sacle ,aspect_ratio)组合
        :param scales: 尺寸
        :param aspect_ratios: 宽高比
        :param dtype:
        :param device:
        :return:
        """
        scales = torch.as_tensor(scales, dtype=dtype, device=device)  # 转为tensor [S]
        aspect_ratios = torch.as_tensor(aspect_ratios, dtype=dtype, device=device)  # 转为tensor[A]
        h_ratios = torch.sqrt(aspect_ratios)  # 计算高度比率 [A]
        w_ratios = 1 / h_ratios  # 计算宽度比率 [A]

        ws = (w_ratios[:, None] * scales[None, :]).view(-1)  # [A,1] * [1,S] -> [A,S]
        hs = (h_ratios[:, None] * scales[None, :]).view(-1)

        base_anchors = torch.stack([-ws, -hs, ws, hs], dim=1) / 2  # 计算 x1,y1,x2,y2
        # 通过round 取整, 定位到像素点
        return base_anchors.round()

    def set_cell_anchors(self, dtype: torch.dtype, device: torch.device):
        """
        设置单个像素(0,0)为中心的锚框的数据类型和设备
        :param dtype:
        :param device:
        :return:
        """
        self.cell_anchors = [cell_anchor.to(dtype=dtype, device=device) for cell_anchor in self.cell_anchors]

    def num_anchors_per_location(self):
        """
        返回每个像素点上的anchors的个数
        :return:
        """
        return [len(s) * len(a) for s, a in zip(self.sizes, self.aspect_ratios)]

    # For every combination of (a, (g, s), i) in (self.cell_anchors, zip(grid_sizes, strides), 0:2),
    # output g[i] anchors that are s[i] distance apart in direction i, with the same dimensions as a.
    def grid_anchors(self, grid_sizes: List[List[int]], strides: List[List[Tensor]]) -> List[Tensor]:
        """

        :param grid_sizes: [F,2] 网格大小 F:特征图个数
        :param strides: [F,2] 步长
        :return:
        """
        anchors = []  # anchor的list
        cell_anchors = self.cell_anchors
        assert cell_anchors is not None, "cell_anchors should not be None"
        assert len(grid_sizes) == len(strides) == len(cell_anchors), \
            "Anchors should be Tuple[Tuple[int]] because each feature " \
            "map could potentially have different sizes and aspect ratios. " \
            "There needs to be a match between the number of " \
            "feature maps passed and the number of sizes / aspect ratios specified."

        for size, stride, base_anchors in zip(grid_sizes, strides, cell_anchors):
            grid_height, grid_width = size
            stride_height, stride_width = stride
            device = base_anchors.device

            # For output anchor, compute [x_center, y_center, x_center, y_center]
            shifts_x = torch.arange(0, grid_width, dtype=torch.int32, device=device) * stride_width
            shifts_y = torch.arange(0, grid_height, dtype=torch.int32, device=device) * stride_height
            shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x, indexing="ij")
            shift_x = shift_x.reshape(-1)
            shift_y = shift_y.reshape(-1)
            shifts = torch.stack((shift_x, shift_y, shift_x, shift_y), dim=1)

            # For every (base anchor, output anchor) pair,
            # offset each zero-centered base anchor by the center of the output anchor.
            anchors.append((shifts.view(-1, 1, 4) + base_anchors.view(1, -1, 4)).reshape(-1, 4))

        return anchors

    def forward(self, image_list: ImageList, feature_maps: List[Tensor]) -> List[Tensor]:
        """
        由于可能有很多的特征图 因此 输入的 feature_maps 是一个list 其中的一个tensor shape 应该是 [B,C,H,W]
        当是单个特征图时 如下
        默认只有一个特征图
        >>> anchorGenerator = AnchorGenerator()
        >>>
        >>> image = torch.randn((8, 3, 512, 512))
        >>>
        >>> imageList = ImageList(image, [(i * 64 + 64, i * 64 + 64) for i in range(8)])
        >>>
        >>> feature_maps = [torch.randn((8, 64, 32, 32))]
        >>>
        >>> anchorGenerator(imageList, feature_maps)
        当有多个特征图时(假设有2个)
        >>> anchorGenerator = AnchorGenerator(sizes=((16,32,64),(32,64,128)),aspect_ratios=((0.5, 1.0, 2.0),(0.5, 1.0, 2.0)))
        >>>
        >>> image = torch.randn((8, 3, 512, 512))
        >>>
        >>> imageList = ImageList(image, [(i * 64 + 64, i * 64 + 64) for i in range(8)])
        >>>
        >>> feature_maps = [ (torch.randn((8, 64, 32, 32)))  for _ in range(2)]
        :param image_list:
        :param feature_maps:
        :return:
        """
        grid_sizes = [feature_map.shape[-2:] for feature_map in feature_maps]  # 得特征图的h,w, 到网格大小
        image_size = image_list.tensors.shape[-2:]  # 得到tensor中图像的尺寸
        dtype, device = feature_maps[0].dtype, feature_maps[0].device  # 得到feature_maps的数据格式和feature_maps的设备
        # 产生从原图到特征图映射关系移动的步长
        strides = [
            [
                torch.empty((), dtype=torch.int64, device=device).fill_(image_size[0] // g[0]),  # H 步长
                torch.empty((), dtype=torch.int64, device=device).fill_(image_size[1] // g[1]),  # W 步长
            ]
            for g in grid_sizes
        ]
        # 设置cell_anchors的数据格式和设备
        self.set_cell_anchors(dtype, device)
        # 生成特征图上每一个像素点上的anchors
        anchors_over_all_feature_maps = self.grid_anchors(grid_sizes, strides)
        anchors: List[List[torch.Tensor]] = []
        # 对于batch中的每一个图像, 其anchor 都是相同的
        for _ in range(len(image_list.image_sizes)):
            anchors_in_image = [anchors_per_feature_map for anchors_per_feature_map in anchors_over_all_feature_maps]
            anchors.append(anchors_in_image)
        anchors = [torch.cat(anchors_per_image) for anchors_per_image in anchors]
        return anchors


if __name__ == "__main__":
    # anchorGenerator = AnchorGenerator()
    # image = torch.randn((8, 3, 512, 512))
    # imageList = ImageList(image, [(i * 64 + 64, i * 64 + 64) for i in range(8)])
    # feature_maps = [torch.randn((8, 64, 32, 32))]
    # anchorGenerator(imageList, feature_maps)

    anchorGenerator = AnchorGenerator(sizes=((16, 32, 64), (32, 64, 128)),
                                      aspect_ratios=((0.5, 1.0, 2.0), (0.5, 1.0, 2.0)))
    image = torch.randn((8, 3, 512, 512))
    imageList = ImageList(image, [(i * 64 + 64, i * 64 + 64) for i in range(8)])
    feature_maps = [torch.randn((8, 64, 32+32*i, 32+32*i)) for i in range(2)]
    anchorGenerator(imageList, feature_maps)
