import torch

from ..builder import BBOX_ASSIGNERS
from ..iou_calculators import build_iou_calculator
from ...anchor import AnchorGenerator
from .assign_result import AssignResult
from .base_assigner import BaseAssigner
from ...utils.misc import unmap
from typing import List
import cv2
import os
import pdb


count = 0
"""
wxz:
atss_assigner_mapping will selete positive sample in fpn featmap whose stride=4,
and then map the positive anchor to the corresponding anchor in featmap whose
stride=8,

"""


@BBOX_ASSIGNERS.register_module()
class ATSSAssigner_mapping(BaseAssigner):
    """Assign a corresponding gt bbox or background to each bbox.

    Each proposals will be assigned with `0` or a positive integer
    indicating the ground truth index.

    - 0: negative sample, no assigned gt
    - positive integer: positive sample, index (1-based) of assigned gt

    Args:
        topk (float): number of bbox selected in each level
    """

    def __init__(
        self,
        topk,
        iou_calculator=dict(type="BboxOverlaps2D"),
        ignore_iof_thr=-1,
        thre_method="mean+var",
        print_bbox_in_img=False,
        print_num_anchor=False,
        saved_path="/workspace/volume/wangxianzhuo-data/data/facecar_visual/anchor/0",
    ):
        self.topk = topk
        self.iou_calculator = build_iou_calculator(iou_calculator)
        self.ignore_iof_thr = ignore_iof_thr
        self.thre_method = thre_method
        self.print_bbox_in_img = print_bbox_in_img
        self.print_num_anchor = print_num_anchor
        self.saved_path = saved_path

        # https://github.com/sfzhang15/ATSS/blob/master/atss_core/modeling/rpn/atss/loss.py

    def assign(
        self,
        old_bboxes,
        old_num_level_bboxes,
        gt_bboxes,
        gt_bboxes_ignore=None,
        gt_labels=None,
        img_meta=None,
        featmap_sizes=None,
    ):
        """Assign gt to bboxes.

        The assignment is done in following steps

        1. compute iou between all bbox (bbox of all pyramid levels) and gt
        2. compute center distance between all bbox and gt
        3. on each pyramid level, for each gt, select k bbox whose center
           are closest to the gt center, so we total select k*l bbox as
           candidates for each gt
        4. get corresponding iou for the these candidates, and compute the
           mean and std, set mean + std as the iou threshold
        5. select these candidates whose iou are greater than or equal to
           the threshold as positive
        6. limit the positive sample's center in gt


        Args:
            old_bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4).
            old_num_level_bboxes (List): num of bboxes in each level
            gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4).
            gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are
                labelled as `ignored`, e.g., crowd boxes in COCO.
            gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ).
            featmap_sizes(Tensor, optional): use to generate small anchor
                which stride is n/2. n is the smallest stride in FPN outputs.

        Returns:
            :obj:`AssignResult`: The assign result.
        """

        # -------------add stride=4 anchors --------------------------------
        if not featmap_sizes:
            pass
        else:
            # generate stride=4 anchors
            with torch.no_grad():
                biggest_featmap = featmap_sizes[0]
                new_featmap_size = []
                for i in range(len(biggest_featmap)):
                    new_featmap_size.append(biggest_featmap[i] * 2)
                new_featmap_size = tuple(new_featmap_size)
                anchor_generator = AnchorGenerator(
                    ratios=[1.0],
                    octave_base_scale=2,
                    scales_per_octave=1,
                    strides=[4, 8],
                )

                small_anchor_list, small_valid_flag_list = get_anchors(
                    anchor_generator, [new_featmap_size, biggest_featmap], [img_meta]
                )

                # 和在get_target_single中的过滤结果一致。
                inside_flags = [0, 0]
                new_anchor_list = [0, 0]
                for i in range(len(small_anchor_list[0])):
                    inside_flags[i] = anchor_inside_flags(
                        small_anchor_list[0][i],
                        small_valid_flag_list[0][i],
                        img_meta["img_shape"][:2],
                        -1,
                    )
                    new_anchor_list[i] = small_anchor_list[0][i][inside_flags[i], :]

                # save bboxes, and restore when this function ends.
                #  new_bboxes = bboxes.clone()
                bboxes = torch.cat(
                    [new_anchor_list[0], old_bboxes[old_num_level_bboxes[0] :, :]]
                )
                num_level_bboxes = old_num_level_bboxes[:]
                num_level_bboxes[0] = len(new_anchor_list[0])

        INF = 100000000
        self.INF = INF
        # wxz: deepcopy bboxes
        bboxes = bboxes[:, :4]
        num_gt, num_bboxes = gt_bboxes.size(0), bboxes.size(0)

        # compute iou between all bbox and gt
        overlaps = self.iou_calculator(bboxes, gt_bboxes)

        # assign 0 by default
        assigned_gt_inds = overlaps.new_full((num_bboxes,), 0, dtype=torch.long)

        if num_gt == 0 or num_bboxes == 0:
            # No ground truth or boxes, return empty assignment
            max_overlaps = overlaps.new_zeros((num_bboxes,))
            if num_gt == 0:
                # No truth, assign everything to background
                assigned_gt_inds[:] = 0
            if gt_labels is None:
                assigned_labels = None
            else:
                assigned_labels = overlaps.new_full((num_bboxes,), -1, dtype=torch.long)
            return AssignResult(
                num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels
            )

        # compute center distance between all bbox and gt
        gt_cx = (gt_bboxes[:, 0] + gt_bboxes[:, 2]) / 2.0
        gt_cy = (gt_bboxes[:, 1] + gt_bboxes[:, 3]) / 2.0
        gt_points = torch.stack((gt_cx, gt_cy), dim=1)

        bboxes_cx = (bboxes[:, 0] + bboxes[:, 2]) / 2.0
        bboxes_cy = (bboxes[:, 1] + bboxes[:, 3]) / 2.0
        bboxes_points = torch.stack((bboxes_cx, bboxes_cy), dim=1)

        # if gt_points.shape = [2,2] ,gt_points[None,:,:].shape = [1,2,2]
        # this operation aims to expand dimension
        distances = (
            (bboxes_points[:, None, :] - gt_points[None, :, :]).pow(2).sum(-1).sqrt()
        )

        if (
            self.ignore_iof_thr > 0
            and gt_bboxes_ignore is not None
            # torch.numel->int. returns the total number of elements in the input tensor.
            and gt_bboxes_ignore.numel() > 0
            and bboxes.numel() > 0
        ):
            ignore_overlaps = self.iou_calculator(bboxes, gt_bboxes_ignore, mode="iof")
            ignore_max_overlaps, _ = ignore_overlaps.max(dim=1)
            ignore_idxs = ignore_max_overlaps > self.ignore_iof_thr
            distances[ignore_idxs, :] = INF
            assigned_gt_inds[ignore_idxs] = -1

        # Selecting candidates based on the center distance
        candidate_idxs = []
        start_idx = 0
        for level, bboxes_per_level in enumerate(num_level_bboxes):
            # on each pyramid level, for each gt,
            # select k bbox whose center are closest to the gt center
            end_idx = start_idx + bboxes_per_level
            distances_per_level = distances[start_idx:end_idx, :]
            selectable_k = min(self.topk, bboxes_per_level)
            # wxz: see https://pytorch.org/docs/stable/generated/torch.topk.html#torch.topk
            _, topk_idxs_per_level = distances_per_level.topk(
                selectable_k, dim=0, largest=False
            )
            candidate_idxs.append(topk_idxs_per_level + start_idx)
            start_idx = end_idx
        candidate_idxs = torch.cat(candidate_idxs, dim=0)
        # candidate_idxs.shape = [num_level * topk, num_gt]

        # get corresponding iou for the these candidates, and compute the
        # mean and std, set mean + std as the iou threshold
        candidate_overlaps = overlaps[candidate_idxs, torch.arange(num_gt)]
        overlaps_mean_per_gt = candidate_overlaps.mean(0)
        overlaps_std_per_gt = candidate_overlaps.std(0)
        if self.thre_method == "mean+var":
            overlaps_thr_per_gt = overlaps_mean_per_gt + overlaps_std_per_gt
        elif self.thre_method == "mean":
            overlaps_thr_per_gt = overlaps_mean_per_gt
        elif self.thre_method == "mean-var":
            overlaps_thr_per_gt = overlaps_mean_per_gt - overlaps_std_per_gt
        else:
            raise ValueError(f"No such '{self.thre_method}' thre_method.")

        is_pos = candidate_overlaps >= overlaps_thr_per_gt[None, :]

        # limit the positive sample's center in gt
        # TODO(wangxianzhuo) why increase the index of candidate? ---------
        for gt_idx in range(num_gt):
            candidate_idxs[:, gt_idx] += gt_idx * num_bboxes
        # -----------------------------------------------------------------
        ep_bboxes_cx = (
            bboxes_cx.view(1, -1).expand(num_gt, num_bboxes).contiguous().view(-1)
        )
        ep_bboxes_cy = (
            bboxes_cy.view(1, -1).expand(num_gt, num_bboxes).contiguous().view(-1)
        )
        candidate_idxs = candidate_idxs.view(-1)

        # calculate the left, top, right, bottom distance between positive
        # bbox center and gt side
        l_ = ep_bboxes_cx[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 0]
        t_ = ep_bboxes_cy[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 1]
        r_ = gt_bboxes[:, 2] - ep_bboxes_cx[candidate_idxs].view(-1, num_gt)
        b_ = gt_bboxes[:, 3] - ep_bboxes_cy[candidate_idxs].view(-1, num_gt)
        is_in_gts = torch.stack([l_, t_, r_, b_], dim=1).min(dim=1)[0] > 0.01
        is_pos = is_pos & is_in_gts

        # if an anchor box is assigned to multiple gts,
        # the one with the highest IoU will be selected.
        # wxz: https://pytorch.org/docs/stable/generated/torch.t.html#torch.t
        # wxz: t: transpose
        overlaps_inf = torch.full_like(overlaps, -INF).t().contiguous().view(-1)
        index = candidate_idxs.view(-1)[is_pos.view(-1)]
        overlaps_inf[index] = overlaps.t().contiguous().view(-1)[index]
        overlaps_inf = overlaps_inf.view(num_gt, -1).t()

        # overlap_inf.shape = [num_anchors, num_gt]
        # max_overlaps,储存每个anchor对于所有gt最大的交并比
        # argmax_overlaps, 储存每个anchor取得最大交并比时的对应的gt。
        max_overlaps, argmax_overlaps = overlaps_inf.max(dim=1)
        # ------------------ start mapping ------------------------
        if not featmap_sizes:
            ...
        else:
            with torch.no_grad():
                # 过滤之后的小anchor匹配结果
                h, w = 2 * featmap_sizes[0][0], 2 * featmap_sizes[0][1]
                after_filter_small_anchor_max_overlaps = max_overlaps[
                    0 : num_level_bboxes[0]
                ]
                after_filter_small_anchor_argmax_overlaps = argmax_overlaps[
                    0 : num_level_bboxes[0]
                ]

                # 过滤之前的小anchor匹配结果
                before_filter_small_anchor_max_overlaps = unmap(
                    after_filter_small_anchor_max_overlaps,
                    inside_flags[0].size(0),
                    inside_flags[0],
                )
                before_filter_small_anchor_argmax_overlaps = unmap(
                    after_filter_small_anchor_argmax_overlaps,
                    inside_flags[0].size(0),
                    inside_flags[0],
                )

                # TODO(wxz): 把过滤之前的小anchor映射到过滤之前的大anchor上
                # its.shape == h * w

                (
                    before_filter_big_max_overlaps,
                    before_filter_big_argmax_overlaps,
                ) = shrinking_mapping_usepool(
                    before_filter_small_anchor_max_overlaps,
                    before_filter_small_anchor_argmax_overlaps,
                    h,
                    w,
                )

                # TODO(wxz): 把过滤之前的大anchor映射到过滤之后的大anchor上
                after_filter_big_max_overlaps = before_filter_big_max_overlaps[
                    inside_flags[1]
                ]
                after_filter_big_argmax_overlaps = before_filter_big_argmax_overlaps[
                    inside_flags[1]
                ]

                # TODO(wxz): 更新num_bboxes，这个变量之后会用到
                num_bboxes = num_bboxes - num_level_bboxes[0] + old_num_level_bboxes[0]

                # TODO(wxz): 把过滤之后的大anchor拼接回assigned_gt_inds上
                assigned_gt_inds = overlaps.new_full((num_bboxes,), 0, dtype=torch.long)
                # # 把其他level的值粘贴过来
                max_overlaps = torch.cat(
                    [after_filter_big_max_overlaps, max_overlaps[num_level_bboxes[0] :]]
                )
                argmax_overlaps = torch.cat(
                    [
                        after_filter_big_argmax_overlaps,
                        argmax_overlaps[num_level_bboxes[0] :],
                    ]
                )
                # restore
                #  bboxes = new_bboxes
                #  num_level_bboxes[0] = new_anchor_list[1].size(0)
        assigned_gt_inds[max_overlaps != -INF] = (
            argmax_overlaps[max_overlaps != -INF] + 1
        )
        #  pdb.set_trace()

        #  pdb.set_trace()
        # ------------------------------------------------------------wxz
        #  saved_path = "/workspace/volume/wangxianzhuo-data/data/facecar_visual/anchor/0"
        if self.print_bbox_in_img:
            os.makedirs(self.saved_path, exist_ok=True)
            self._print_bbox_in_img(
                img_meta, gt_bboxes, bboxes, max_overlaps, self.saved_path
            )

        if self.print_num_anchor:
            os.makedirs(self.saved_path, exist_ok=True)
            self._print_num_anchor(
                gt_bboxes,
                num_bboxes,
                num_level_bboxes,
                index,
                img_meta,
                gt_labels,
                self.saved_path,
            )

        if gt_labels is not None:
            assigned_labels = assigned_gt_inds.new_full((num_bboxes,), -1)
            pos_inds = torch.nonzero(assigned_gt_inds > 0, as_tuple=False).squeeze()
            if pos_inds.numel() > 0:
                assigned_labels[pos_inds] = gt_labels[assigned_gt_inds[pos_inds] - 1]
        else:
            assigned_labels = None
        return AssignResult(
            num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels
        )

    def _print_bbox_in_img(self, img_meta, gt_bboxes, bboxes, max_overlaps, saved_path):
        """_print_bbox_in_img.

        Args:
            img_meta: img_meta infomation
            gt_bboxes: gt bboxes location
            bboxes: all generated anchors
            max_overlaps: a flag that indicates a bbox being assigned as pos anchor.
        """

        img_raw = cv2.imread(img_meta["filename"], cv2.IMREAD_COLOR)

        global count
        #  print(count)
        count = count + 1
        if count > 100:
            os._exit(0)

        for i in range(len(gt_bboxes)):
            b = gt_bboxes[i, :]
            # print(b)
            cv2.rectangle(
                img_raw, (int(b[0]), int(b[1])), (int(b[2]), int(b[3])), (0, 0, 255), 2
            )
        bboxes_assign = bboxes[(max_overlaps != -self.INF), :]
        for i in range(len(bboxes_assign)):
            b = bboxes_assign[i, :]
            # print(b)
            cv2.rectangle(
                img_raw, (int(b[0]), int(b[1])), (int(b[2]), int(b[3])), (0, 255, 0), 2
            )
        cv2.imwrite(os.path.join(saved_path, str(count) + ".jpg"), img_raw)

    def _print_num_anchor(
        self,
        gt_bboxes,
        num_bboxes,
        num_level_bboxes,
        index,
        img_meta,
        gt_labels,
        saved_path,
    ):
        """_print_num_anchor.

        Args:
            gt_bboxes: gt bboxes location
            num_bboxes: the amount of generated anchors
            num_level_bboxes(List): the amount of generated anchors from each FPN output
            index: the positive anchor indexes
            img_meta: img_meta infomation
            gt_labels: gt labels
        """
        num_gt = gt_bboxes.size(0)
        num_bboxes_from_level = self._get_num_bboxes_from_level(
            num_gt, num_bboxes, num_level_bboxes, index
        )
        msg_type = "only_look_level1"

        if msg_type == "full":
            written_msgs = ""
            written_msgs += f"{img_meta['filename']}\n"
            written_msgs += f"\tTotally {num_gt} GT Objects\n"
            for i in range(num_gt):
                written_msgs += f"\t\t{i}th GT cls: {int(gt_labels[i].cpu())}. "
                written_msgs += "num pos anchor in fpn level"
                written_msgs += f"{num_bboxes_from_level[i]}. "
                area = (gt_bboxes[i][2] - gt_bboxes[i][0]) * (
                    gt_bboxes[i][3] - gt_bboxes[i][1]
                )
                area = float(area.cpu())
                written_msgs += f"area: {area:.2f}\n"
        elif msg_type == "only_look_level1":
            written_msgs = ""
            written_msgs += f"{img_meta['filename']}\n"
            written_msgs += f"\tTotally {num_gt} GT Objects\n"
            for i in range(num_gt):
                written_msgs += f"\t\t{i}th GT cls: {int(gt_labels[i].cpu())}. "
                written_msgs += "num pos anchor in 1st level: "
                written_msgs += f"{num_bboxes_from_level[i][0]}. "
                area = (gt_bboxes[i][2] - gt_bboxes[i][0]) * (
                    gt_bboxes[i][3] - gt_bboxes[i][1]
                )
                area = float(area.cpu())
                written_msgs += f"area: {area:.2f}\n"

        #
        #  print(num_bboxes_from_level)
        saved_path = saved_path.split("/")
        filename_index = saved_path[-1]
        saved_path = "/".join(saved_path[:-1])
        with open(os.path.join(saved_path, f"data{filename_index}.txt"), "a") as f:
            f.write(written_msgs)

    def _get_num_bboxes_from_level(self, num_gt, num_bboxes, num_level_bboxes, index):
        """_get_num_bboxes_from_level.

        Args:
            num_gt:
            num_bboxes:
            num_level_bboxes:

        """
        num_bboxes_from_level = [[0 for _ in num_level_bboxes] for _ in range(num_gt)]
        for i in index:
            gt_index = i // num_bboxes
            i = i % num_bboxes
            for which_level, num_level_bbox in enumerate(num_level_bboxes):
                i -= num_level_bbox
                if i < 0:
                    num_bboxes_from_level[gt_index][which_level] += 1
                    break
        return [num_bboxes_from_level[i][:] for i in range(len(num_bboxes_from_level))]


def get_anchors(anchor_generator, featmap_sizes, img_metas, device="cuda"):
    """Get anchors according to feature map sizes.
        (copied from anchor head)

    Args:
        featmap_sizes (list[tuple]): Multi-level feature map sizes.
        img_metas (list[dict]): Image meta info.
        device (torch.device | str): Device for returned tensors

    Returns:
        tuple:
            anchor_list (list[Tensor]): Anchors of each image.
            valid_flag_list (list[Tensor]): Valid flags of each image.
    """
    num_imgs = len(img_metas)

    # since feature map sizes of all images are the same, we only compute
    # anchors for one time
    multi_level_anchors = anchor_generator.grid_anchors(featmap_sizes, device)
    anchor_list = [multi_level_anchors for _ in range(num_imgs)]

    # for each image, we compute valid flags of multi level anchors
    valid_flag_list = []
    for img_id, img_meta in enumerate(img_metas):
        multi_level_flags = anchor_generator.valid_flags(
            featmap_sizes, img_meta["pad_shape"], device
        )
        valid_flag_list.append(multi_level_flags)

    return anchor_list, valid_flag_list


def anchor_inside_flags(flat_anchors, valid_flags, img_shape, allowed_border=0):
    """Check whether the anchors are inside the border.

    Args:
        flat_anchors (torch.Tensor): Flatten anchors, shape (n, 4).
        valid_flags (torch.Tensor): An existing valid flags of anchors.
        img_shape (tuple(int)): Shape of current image.
        allowed_border (int, optional): The border to allow the valid anchor.
            Defaults to 0.

    Returns:
        torch.Tensor: Flags indicating whether the anchors are inside a \
            valid range.
    """
    img_h, img_w = img_shape[:2]
    if allowed_border >= 0:
        inside_flags = (
            valid_flags
            & (flat_anchors[:, 0] >= -allowed_border)
            & (flat_anchors[:, 1] >= -allowed_border)
            & (flat_anchors[:, 2] < img_w + allowed_border)
            & (flat_anchors[:, 3] < img_h + allowed_border)
        )
    else:
        inside_flags = valid_flags
    return inside_flags


def shrinking_mapping(
    max_overlaps, argmax_overlaps, h, w
) -> (torch.Tensor, torch.Tensor):
    ...
    """
    把argmax_overlaps进行2x2 pooling，保留的数在max_overlaps对应位置的值需要在2x2pool中最大。
    然后把pooling后的max_overlaps也输出

    Examples:
    --------------------
    max_overlaps:
    tensor([[0.5571, 0.8562, 0.1186, 0.5961],
            [0.7995, 0.6713, 0.5722, 0.8379],
            [0.2244, 0.2155, 0.3880, 0.8158],
            [0.6880, 0.7393, 0.3689, 0.6843]])

    argmax_overlaps:
    tensor([[ 3,  1, 17,  5],
            [13, 14,  1,  3],
            [19,  9, 17, 17],
            [13,  4,  1, 18]])

    output_argmax:
    tensor([[ 1,  3],
            [ 4, 17]])

    return:
        返回的argmax中的元素需要是int类型的。
    """
    #  print(max_overlaps[0:20])
    max_overlaps = max_overlaps.reshape(h, w)
    argmax_overlaps = argmax_overlaps.reshape(h, w)
    output_max = max_overlaps.new_full((h // 2, w // 2), 0)
    # new_full的dtype和调用的tensor一致，这里使用argmax，类型都是int64的。
    output_argmax = argmax_overlaps.new_full((h // 2, w // 2), 0)
    for i in range(h // 2):
        for j in range(w // 2):
            tmp_matrix = max_overlaps[2 * i : 2 * i + 2, 2 * j : 2 * j + 2]
            #  print(tmp_matrix)
            value, indices = tmp_matrix.max(dim=1)
            _, new_indices = value.max(dim=0)
            indice_i = new_indices
            indice_j = indices[indice_i]
            output_argmax[i][j] = argmax_overlaps[2 * i + indice_i][2 * j + indice_j]
            output_max[i][j] = max_overlaps[2 * i + indice_i][2 * j + indice_j]
    return output_max.reshape(-1), output_argmax.reshape(-1)


@torch.no_grad()
def shrinking_mapping_usepool(max_overlaps, argmax_overlaps, h, w):
    """
    把argmax_overlaps进行2x2 pooling，保留的数在max_overlaps对应位置的值需要在2x2pool中最大。
    然后把pooling后的max_overlaps也输出

    用pytorch自带的pooling2d来处理，速度比上面用循环的快了40倍。

    Examples:
    --------------------
    max_overlaps:
    tensor([[0.5571, 0.8562, 0.1186, 0.5961],
            [0.7995, 0.6713, 0.5722, 0.8379],
            [0.2244, 0.2155, 0.3880, 0.8158],
            [0.6880, 0.7393, 0.3689, 0.6843]])

    argmax_overlaps:
    tensor([[ 3,  1, 17,  5],
            [13, 14,  1,  3],
            [19,  9, 17, 17],
            [13,  4,  1, 18]])

    output_argmax:
    tensor([[ 1,  3],
            [ 4, 17]])

    return:
        返回的argmax中的元素需要是int类型的。
    """
    max_overlaps = max_overlaps.reshape(h, w).unsqueeze(0).unsqueeze(0)
    argmax_overlaps = argmax_overlaps.reshape(h, w).unsqueeze(0).unsqueeze(0)
    pool = torch.nn.MaxPool2d(kernel_size=2, stride=2, return_indices=True)
    unpool = torch.nn.MaxUnpool2d(kernel_size=2, stride=2)

    output_max, indices = pool(max_overlaps)

    all_one = torch.ones_like(output_max)
    sparse_slices = unpool(all_one, indices)
    output_argmax = argmax_overlaps * sparse_slices

    # 再进行pooling
    output_argmax, _ = pool(output_argmax)

    output_max = output_max.squeeze(0).squeeze(0)
    output_argmax = output_argmax.squeeze(0).squeeze(0).int()

    #  print(output_argmax)
    return output_max.reshape(-1), output_argmax.reshape(-1)
