import torch

from ..builder import BBOX_ASSIGNERS
from ..iou_calculators import build_iou_calculator
from ...anchor import AnchorGenerator
from .assign_result import AssignResult
from .base_assigner import BaseAssigner
from .assigner_utils import print_bbox_in_img as _print_bbox_in_img
from .assigner_utils import print_num_anchor as _print_num_anchor
from typing import List
import cv2
import os
import pdb


@BBOX_ASSIGNERS.register_module()
class ATSSAssigner_replace_thre(BaseAssigner):
    """Assign a corresponding gt bbox or background to each bbox.

    Each proposals will be assigned with `0` or a positive integer
    indicating the ground truth index.

    - 0: negative sample, no assigned gt
    - positive integer: positive sample, index (1-based) of assigned gt

    Args:
        topk (float): number of bbox selected in each level
    """

    def __init__(
        self,
        topk,
        iou_calculator=dict(type="BboxOverlaps2D"),
        ignore_iof_thr=-1,
        thre_method="mean+var",
        print_bbox_in_img=False,
        print_num_anchor=False,
        saved_path="/workspace/volume/wangxianzhuo-data/data/facecar_visual/anchor/0",
        add_lvl1_anchor=False,
    ):
        self.topk = topk
        self.iou_calculator = build_iou_calculator(iou_calculator)
        self.ignore_iof_thr = ignore_iof_thr
        self.thre_method = thre_method
        self.print_bbox_in_img = print_bbox_in_img
        self.print_num_anchor = print_num_anchor
        self.saved_path = saved_path
        self.add_lvl1_anchor = add_lvl1_anchor

        # https://github.com/sfzhang15/ATSS/blob/master/atss_core/modeling/rpn/atss/loss.py

    def get_origin_threshold(
        self,
        gt_bboxes,
        gt_bboxes_ignore=None,
        gt_labels=None,
        img_meta=None,
        featmap_sizes=None,
    ):
        """Return the threshold when using origin anchors.
        origin:
            anchor_generator = AnchorGenerator(
                ratios=[1.0],
                octave_base_scale=2,
                scales_per_octave=1,
                strides=[4, 8, 16],
                base_sizes=[4, 8, 16],
            )
            featmap_stride = [4,8,16]

        now:
            anchor_generator = AnchorGenerator(
                ratios=[1.0],
                octave_base_scale=2,
                scales_per_octave=1,
                strides=[8, 8, 16],
                base_sizes=[4, 8, 16],
            )
            featmap_stride = [8,8,16]


        """
        # TODO:generate origin anchors-------------------------------
        if 1:
            anchor_generator = AnchorGenerator(
                ratios=[1.0],
                octave_base_scale=2,
                scales_per_octave=1,
                strides=[4, 8, 16],
                base_sizes=[4, 8, 16],
            )
            biggest_featmap = featmap_sizes[0]
            new_featmap_size = []
            for i in range(len(biggest_featmap)):
                new_featmap_size.append(biggest_featmap[i] * 2)
            new_featmap_size = tuple(new_featmap_size)

            anchor_list, valid_flag_list = get_anchors(
                anchor_generator, [new_featmap_size, *featmap_sizes[1:]], [img_meta]
            )
            # concatenate---------

            num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
            anchor_list = torch.cat(anchor_list[0])
            valid_flag_list = torch.cat(valid_flag_list[0])

            # more accurate filtering-------
            inside_flags = anchor_inside_flags(
                anchor_list, valid_flag_list, img_meta["img_shape"][:2], -1
            )
            # assign gt and sample anchors
            # 只选用合格的anchor。
            bboxes = anchor_list[inside_flags, :]

            # 统计合格的anchor在不同的fpn level里的个数
            num_level_anchors_inside = self.get_num_level_anchors_inside(
                num_level_anchors, inside_flags
            )

            num_level_bboxes = num_level_anchors_inside

        # generate origin anchros END -------------------------------

        # wxz: deepcopy bboxes
        bboxes = bboxes[:, :4]
        num_gt, num_bboxes = gt_bboxes.size(0), bboxes.size(0)

        # compute iou between all bbox and gt
        overlaps = self.iou_calculator(bboxes, gt_bboxes)

        # assign 0 by default
        assigned_gt_inds = overlaps.new_full((num_bboxes,), 0, dtype=torch.long)

        if num_gt == 0 or num_bboxes == 0:
            # No ground truth or boxes, return empty assignment
            max_overlaps = overlaps.new_zeros((num_bboxes,))
            if num_gt == 0:
                # No truth, assign everything to background
                assigned_gt_inds[:] = 0
            if gt_labels is None:
                assigned_labels = None
            else:
                assigned_labels = overlaps.new_full((num_bboxes,), -1, dtype=torch.long)

        # compute center distance between all bbox and gt
        gt_cx = (gt_bboxes[:, 0] + gt_bboxes[:, 2]) / 2.0
        gt_cy = (gt_bboxes[:, 1] + gt_bboxes[:, 3]) / 2.0
        gt_points = torch.stack((gt_cx, gt_cy), dim=1)

        bboxes_cx = (bboxes[:, 0] + bboxes[:, 2]) / 2.0
        bboxes_cy = (bboxes[:, 1] + bboxes[:, 3]) / 2.0
        bboxes_points = torch.stack((bboxes_cx, bboxes_cy), dim=1)

        # if gt_points.shape = [2,2] ,gt_points[None,:,:].shape = [1,2,2]
        # this operation aims to expand dimension
        distances = (
            (bboxes_points[:, None, :] - gt_points[None, :, :]).pow(2).sum(-1).sqrt()
        )

        if (
            self.ignore_iof_thr > 0
            and gt_bboxes_ignore is not None
            # torch.numel->int. returns the total number of elements in the input tensor.
            and gt_bboxes_ignore.numel() > 0
            and bboxes.numel() > 0
        ):
            ignore_overlaps = self.iou_calculator(bboxes, gt_bboxes_ignore, mode="iof")
            ignore_max_overlaps, _ = ignore_overlaps.max(dim=1)
            ignore_idxs = ignore_max_overlaps > self.ignore_iof_thr
            distances[ignore_idxs, :] = INF
            assigned_gt_inds[ignore_idxs] = -1

        # Selecting candidates based on the center distance
        candidate_idxs = []
        start_idx = 0
        for level, bboxes_per_level in enumerate(num_level_bboxes):
            # on each pyramid level, for each gt,
            # select k bbox whose center are closest to the gt center
            end_idx = start_idx + bboxes_per_level
            distances_per_level = distances[start_idx:end_idx, :]
            selectable_k = min(self.topk, bboxes_per_level)
            # wxz: see https://pytorch.org/docs/stable/generated/torch.topk.html#torch.topk
            _, topk_idxs_per_level = distances_per_level.topk(
                selectable_k, dim=0, largest=False
            )
            candidate_idxs.append(topk_idxs_per_level + start_idx)
            start_idx = end_idx
        candidate_idxs = torch.cat(candidate_idxs, dim=0)
        # candidate_idxs.shape = [num_level * topk, num_gt]

        # get corresponding iou for the these candidates, and compute the
        # mean and std, set mean + std as the iou threshold
        candidate_overlaps = overlaps[candidate_idxs, torch.arange(num_gt)]
        overlaps_mean_per_gt = candidate_overlaps.mean(0)
        overlaps_std_per_gt = candidate_overlaps.std(0)
        if self.thre_method == "mean+var":
            overlaps_thr_per_gt = overlaps_mean_per_gt + overlaps_std_per_gt
        elif self.thre_method == "mean":
            overlaps_thr_per_gt = overlaps_mean_per_gt
        elif self.thre_method == "mean-var":
            overlaps_thr_per_gt = overlaps_mean_per_gt - overlaps_std_per_gt
        else:
            raise ValueError(f"No such '{self.thre_method}' thre_method.")
        return overlaps_thr_per_gt

    def assign(
        self,
        bboxes,
        num_level_bboxes,
        gt_bboxes,
        gt_bboxes_ignore=None,
        gt_labels=None,
        img_meta=None,
        featmap_sizes=None,
    ):
        """Assign gt to bboxes.

        The assignment is done in following steps

        1. compute iou between all bbox (bbox of all pyramid levels) and gt
        2. compute center distance between all bbox and gt
        3. on each pyramid level, for each gt, select k bbox whose center
           are closest to the gt center, so we total select k*l bbox as
           candidates for each gt
        4. get corresponding iou for the these candidates, and compute the
           mean and std, set mean + std as the iou threshold
        5. select these candidates whose iou are greater than or equal to
           the threshold as positive
        6. limit the positive sample's center in gt


        Args:
            bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4).
            num_level_bboxes (List): num of bboxes in each level
            gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4).
            gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are
                labelled as `ignored`, e.g., crowd boxes in COCO.
            gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ).

        Returns:
            :obj:`AssignResult`: The assign result.
        """
        import ipdb

        ipdb.set_trace()

        # use old assigne----------
        origin_threshold = self.get_origin_threshold(
            gt_bboxes, gt_bboxes_ignore, gt_labels, img_meta, featmap_sizes
        )
        # use old assginer END ----

        INF = 100000000
        self.INF = INF
        # wxz: deepcopy bboxes
        bboxes = bboxes[:, :4]
        num_gt, num_bboxes = gt_bboxes.size(0), bboxes.size(0)

        # compute iou between all bbox and gt
        overlaps = self.iou_calculator(bboxes, gt_bboxes)

        # assign 0 by default
        assigned_gt_inds = overlaps.new_full((num_bboxes,), 0, dtype=torch.long)

        if num_gt == 0 or num_bboxes == 0:
            # No ground truth or boxes, return empty assignment
            max_overlaps = overlaps.new_zeros((num_bboxes,))
            if num_gt == 0:
                # No truth, assign everything to background
                assigned_gt_inds[:] = 0
            if gt_labels is None:
                assigned_labels = None
            else:
                assigned_labels = overlaps.new_full((num_bboxes,), -1, dtype=torch.long)
            return AssignResult(
                num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels
            )

        # compute center distance between all bbox and gt
        gt_cx = (gt_bboxes[:, 0] + gt_bboxes[:, 2]) / 2.0
        gt_cy = (gt_bboxes[:, 1] + gt_bboxes[:, 3]) / 2.0
        gt_points = torch.stack((gt_cx, gt_cy), dim=1)

        bboxes_cx = (bboxes[:, 0] + bboxes[:, 2]) / 2.0
        bboxes_cy = (bboxes[:, 1] + bboxes[:, 3]) / 2.0
        bboxes_points = torch.stack((bboxes_cx, bboxes_cy), dim=1)

        # if gt_points.shape = [2,2] ,gt_points[None,:,:].shape = [1,2,2]
        # this operation aims to expand dimension
        distances = (
            (bboxes_points[:, None, :] - gt_points[None, :, :]).pow(2).sum(-1).sqrt()
        )

        if (
            self.ignore_iof_thr > 0
            and gt_bboxes_ignore is not None
            # torch.numel->int. returns the total number of elements in the input tensor.
            and gt_bboxes_ignore.numel() > 0
            and bboxes.numel() > 0
        ):
            ignore_overlaps = self.iou_calculator(bboxes, gt_bboxes_ignore, mode="iof")
            ignore_max_overlaps, _ = ignore_overlaps.max(dim=1)
            ignore_idxs = ignore_max_overlaps > self.ignore_iof_thr
            distances[ignore_idxs, :] = INF
            assigned_gt_inds[ignore_idxs] = -1

        # Selecting candidates based on the center distance
        candidate_idxs = []
        start_idx = 0
        for level, bboxes_per_level in enumerate(num_level_bboxes):
            # on each pyramid level, for each gt,
            # select k bbox whose center are closest to the gt center
            end_idx = start_idx + bboxes_per_level
            distances_per_level = distances[start_idx:end_idx, :]
            selectable_k = min(self.topk, bboxes_per_level)
            # wxz: see https://pytorch.org/docs/stable/generated/torch.topk.html#torch.topk
            _, topk_idxs_per_level = distances_per_level.topk(
                selectable_k, dim=0, largest=False
            )
            candidate_idxs.append(topk_idxs_per_level + start_idx)
            start_idx = end_idx
        candidate_idxs = torch.cat(candidate_idxs, dim=0)
        # candidate_idxs.shape = [num_level * topk, num_gt]

        # get corresponding iou for the these candidates, and compute the
        # mean and std, set mean + std as the iou threshold
        candidate_overlaps = overlaps[candidate_idxs, torch.arange(num_gt)]
        overlaps_mean_per_gt = candidate_overlaps.mean(0)
        overlaps_std_per_gt = candidate_overlaps.std(0)
        #  if self.thre_method == "mean+var":
        #      overlaps_thr_per_gt = overlaps_mean_per_gt + overlaps_std_per_gt
        #  elif self.thre_method == "mean":
        #      overlaps_thr_per_gt = overlaps_mean_per_gt
        #  elif self.thre_method == "mean-var":
        #      overlaps_thr_per_gt = overlaps_mean_per_gt - overlaps_std_per_gt
        #  else:
        #      raise ValueError(f"No such '{self.thre_method}' thre_method.")
        overlaps_thr_per_gt = origin_threshold

        is_pos = candidate_overlaps >= overlaps_thr_per_gt[None, :]

        # ------------为第一层增加更多的正样本-----------------
        # 可以以0.1为阈值筛选一下第一层的anchor，得到新的index_new,
        # 再与下方的is_pos进行一个或运算，取交集即可。
        if self.add_lvl1_anchor:
            lvl1_thr = torch.full_like(overlaps_thr_per_gt, 0.1)
            lvl1_overlaps = candidate_overlaps.clone()
            lvl1_overlaps[selectable_k:] = 0

            # 只允许在第一层以外其他层没有正样本的gt能够得到更多正样本
            has_pos_in_other_layer = is_pos[selectable_k:].sum(dim=0, dtype=bool)
            lvl1_overlaps = lvl1_overlaps * ~has_pos_in_other_layer

            lvl1_pos = lvl1_overlaps >= lvl1_thr[None, :]
            import ipdb

            #  ipdb.set_trace()

            is_pos = is_pos | lvl1_pos

        # limit the positive sample's center in gt
        # TODO(wangxianzhuo) why increase the index of candidate? ---------
        for gt_idx in range(num_gt):
            candidate_idxs[:, gt_idx] += gt_idx * num_bboxes
        # -----------------------------------------------------------------
        ep_bboxes_cx = (
            bboxes_cx.view(1, -1).expand(num_gt, num_bboxes).contiguous().view(-1)
        )
        ep_bboxes_cy = (
            bboxes_cy.view(1, -1).expand(num_gt, num_bboxes).contiguous().view(-1)
        )
        candidate_idxs = candidate_idxs.view(-1)

        # calculate the left, top, right, bottom distance between positive
        # bbox center and gt side
        l_ = ep_bboxes_cx[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 0]
        t_ = ep_bboxes_cy[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 1]
        r_ = gt_bboxes[:, 2] - ep_bboxes_cx[candidate_idxs].view(-1, num_gt)
        b_ = gt_bboxes[:, 3] - ep_bboxes_cy[candidate_idxs].view(-1, num_gt)
        is_in_gts = torch.stack([l_, t_, r_, b_], dim=1).min(dim=1)[0] > 0.01
        is_pos = is_pos & is_in_gts

        # if an anchor box is assigned to multiple gts,
        # the one with the highest IoU will be selected.
        # wxz: https://pytorch.org/docs/stable/generated/torch.t.html#torch.t
        # wxz: t: transpose
        overlaps_inf = torch.full_like(overlaps, -INF).t().contiguous().view(-1)
        index = candidate_idxs.view(-1)[is_pos.view(-1)]
        overlaps_inf[index] = overlaps.t().contiguous().view(-1)[index]
        overlaps_inf = overlaps_inf.view(num_gt, -1).t()

        max_overlaps, argmax_overlaps = overlaps_inf.max(dim=1)
        assigned_gt_inds[max_overlaps != -INF] = (
            argmax_overlaps[max_overlaps != -INF] + 1
        )

        #  saved_path = "/workspace/volume/wangxianzhuo-data/data/facecar_visual/anchor/0"
        if self.print_bbox_in_img:
            os.makedirs(self.saved_path, exist_ok=True)
            _print_bbox_in_img(
                img_meta, gt_bboxes, bboxes, max_overlaps, self.saved_path
            )

        if self.print_num_anchor:
            os.makedirs(self.saved_path, exist_ok=True)
            _print_num_anchor(
                bboxes,
                gt_bboxes,
                num_bboxes,
                num_level_bboxes,
                index,
                img_meta,
                gt_labels,
                self.saved_path,
                overlaps_thr_per_gt,
            )

        if gt_labels is not None:
            assigned_labels = assigned_gt_inds.new_full((num_bboxes,), -1)
            pos_inds = torch.nonzero(assigned_gt_inds > 0, as_tuple=False).squeeze()
            if pos_inds.numel() > 0:
                assigned_labels[pos_inds] = gt_labels[assigned_gt_inds[pos_inds] - 1]
        else:
            assigned_labels = None
        return AssignResult(
            num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels
        )

    def get_num_level_anchors_inside(self, num_level_anchors, inside_flags):
        split_inside_flags = torch.split(inside_flags, num_level_anchors)
        num_level_anchors_inside = [int(flags.sum()) for flags in split_inside_flags]
        return num_level_anchors_inside


def get_anchors(anchor_generator, featmap_sizes, img_metas, device="cuda"):
    """Get anchors according to feature map sizes.
        (copied from anchor head)

    Args:
        featmap_sizes (list[tuple]): Multi-level feature map sizes.
        img_metas (list[dict]): Image meta info.
        device (torch.device | str): Device for returned tensors

    Returns:
        tuple:
            anchor_list (list[Tensor]): Anchors of each image.
            valid_flag_list (list[Tensor]): Valid flags of each image.
    """
    num_imgs = len(img_metas)

    # since feature map sizes of all images are the same, we only compute
    # anchors for one time
    multi_level_anchors = anchor_generator.grid_anchors(featmap_sizes, device)
    anchor_list = [multi_level_anchors for _ in range(num_imgs)]

    # for each image, we compute valid flags of multi level anchors
    valid_flag_list = []
    for img_id, img_meta in enumerate(img_metas):
        multi_level_flags = anchor_generator.valid_flags(
            featmap_sizes, img_meta["pad_shape"], device
        )
        valid_flag_list.append(multi_level_flags)

    return anchor_list, valid_flag_list


def anchor_inside_flags(flat_anchors, valid_flags, img_shape, allowed_border=0):
    """Check whether the anchors are inside the border.

    Args:
        flat_anchors (torch.Tensor): Flatten anchors, shape (n, 4).
        valid_flags (torch.Tensor): An existing valid flags of anchors.
        img_shape (tuple(int)): Shape of current image.
        allowed_border (int, optional): The border to allow the valid anchor.
            Defaults to 0.

    Returns:
        torch.Tensor: Flags indicating whether the anchors are inside a \
            valid range.
    """
    img_h, img_w = img_shape[:2]
    if allowed_border >= 0:
        inside_flags = (
            valid_flags
            & (flat_anchors[:, 0] >= -allowed_border)
            & (flat_anchors[:, 1] >= -allowed_border)
            & (flat_anchors[:, 2] < img_w + allowed_border)
            & (flat_anchors[:, 3] < img_h + allowed_border)
        )
    else:
        inside_flags = valid_flags
    return inside_flags
