import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from datetime import datetime
from mmcv.cnn import ConvModule, Scale, bias_init_with_prob, normal_init
from mmcv.runner import force_fp32

from mmdet.core import (
    anchor_inside_flags,
    build_anchor_generator,
    bbox2distance,
    bbox_overlaps,
    build_assigner,
    build_sampler,
    distance2bbox,
    images_to_levels,
    multi_apply,
    multiclass_nms,
    reduce_mean,
    unmap,
)
from ..builder import HEADS, build_loss
from .anchor_head import AnchorHead
from ...core.bbox.assigners import print_num_anchor, print_bbox_in_img
from .gfl_head_slim_annotation import GFLSinOut_slim_norm_bbox, DenseBlock
from ..utils.wxz_utils import ste_round_func
from .head_utils import compare_pre_cur_target, print_gt_pred, print_gt_pred_helper


@HEADS.register_module()
class GFLSinOut_S4downsample(GFLSinOut_slim_norm_bbox):
    def __init__(
        self,
        num_classes,
        in_channels,
        num_ins,
        stacked_convs=4,
        conv_cfg=None,
        norm_cfg=dict(type="GN", num_groups=32, requires_grad=True),
        loss_dfl=dict(type="DistributionFocalLoss", loss_weight=0.25),
        reg_max=16,
        integral_sparse=1,
        anchor_generator=dict(
            type="AnchorGenerator",
            scales=[8, 16, 32],
            ratios=[0.5, 1.0, 2.0],
            strides=[4, 8, 16, 32, 64],
        ),
        inference_anchor_generator=dict(
            type="AnchorGenerator",
            scales=[8, 16, 32],
            ratios=[0.5, 1.0, 2.0],
            strides=[4, 8, 16, 32, 64],
        ),
        **kwargs,
    ):
        super(GFLSinOut_S4downsample, self).__init__(
            num_classes,
            in_channels,
            num_ins,
            stacked_convs,
            conv_cfg=conv_cfg,
            norm_cfg=norm_cfg,
            loss_dfl=loss_dfl,
            reg_max=reg_max,
            integral_sparse=integral_sparse,
            **kwargs,
        )
        self.anchor_generator = build_anchor_generator(anchor_generator)
        self.inference_anchor_generator = build_anchor_generator(
            inference_anchor_generator
        )

    def loss_single(
        self,
        anchors,
        cls_score,
        bbox_pred,
        labels,
        label_weights,
        bbox_targets,
        stride,
        num_total_samples,
        img_metas,
    ):
        """loss_single.

        Args:
            anchors:
            cls_score:
            bbox_pred:
            labels:
            label_weights:
            bbox_targets:
            stride:
            num_total_samples:
        """
        assert stride[0] == stride[1], "h stride is not equal to w stride!"
        if stride[0] == 4:
            #  stride = [i * 2 for i in stride]
            H, W = cls_score.shape[2:4]
            # iou between anchors and targets
            ious = bbox_overlaps(anchors, bbox_targets, "iou", True)
            ious = ious.reshape(-1, H * 2, W * 2)
            max_iou2x2, max_iou2x2_inds = F.max_pool2d(ious, 2, return_indices=True)
            # select one from each 2x2
            max_iou2x2_inds = max_iou2x2_inds.reshape(ious.shape[0], -1)
            # generate dim1(batch) inds
            dim1_inds = (
                torch.tensor(range(ious.shape[0]))
                .reshape(-1, 1)
                .expand(max_iou2x2_inds.shape)
                .cuda(max_iou2x2.device)
            )
            # dim2(num total anchors) inds
            dim2_inds = max_iou2x2_inds.reshape(-1)
            dim1_inds = dim1_inds.reshape(-1)
            # (H*W, 2)
            inds = torch.stack((dim1_inds, dim2_inds), dim=1)

            # get final assign results
            anchors = anchors[inds[:, 0], inds[:, 1], :]
            bbox_targets = bbox_targets[inds[:, 0], inds[:, 1], :]
            labels = labels[inds[:, 0], inds[:, 1]]
            label_weights = label_weights[inds[:, 0], inds[:, 1]]

        anchors = anchors.reshape(-1, 4)
        cls_score = cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels)
        bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)

        bbox_targets = bbox_targets.reshape(-1, 4)
        labels = labels.reshape(-1)
        label_weights = label_weights.reshape(-1)

        # FG cat_id: [0, num_classes -1], BG cat_id: num_classes
        bg_class_ind = self.num_classes
        pos_inds = ((labels >= 0) & (labels < bg_class_ind)).nonzero().squeeze(1)
        score = label_weights.new_zeros(labels.shape)

        # paint gt bboxes and anchors in img
        if 0:
            import cv2

            saved_path = "tmp_save/s4downsample_zsb"
            os.makedirs(saved_path, exist_ok=True)

            INF = 100000000
            gt_bboxes = bbox_targets[pos_inds]
            bboxes = anchors[pos_inds]
            img_file = img_metas[0]["filename"].split("/")[-1]
            img_file = os.path.join(saved_path, img_file)
            if os.path.isfile(img_file):
                img_raw = cv2.imread(img_file)
            else:
                img_raw = cv2.imread(img_metas[0]["filename"], cv2.IMREAD_COLOR)

            for i in range(len(gt_bboxes)):
                b = gt_bboxes[i, :]
                cv2.rectangle(
                    img_raw,
                    (int(b[0]), int(b[1])),
                    (int(b[2]), int(b[3])),
                    (0, 0, 255),
                    2,
                )
            for i in range(len(bboxes)):
                b = bboxes[i, :]
                cv2.rectangle(
                    img_raw,
                    (int(b[0]), int(b[1])),
                    (int(b[2]), int(b[3])),
                    (0, 255, 0),
                    2,
                )
            cv2.imwrite(img_file, img_raw)

        def is_inside(anchor_centers, gt_bboxes):
            l_ = anchor_centers[:, 0] - gt_bboxes[:, 0]
            t_ = anchor_centers[:, 1] - gt_bboxes[:, 1]
            r_ = gt_bboxes[:, 2] - anchor_centers[:, 0]
            b_ = gt_bboxes[:, 3] - anchor_centers[:, 1]
            # prevent there being no gt in imgs
            if l_.size()[0] == 0:
                is_in_gts = None
            else:
                is_in_gts = torch.stack([l_, t_, r_, b_], dim=1).min(dim=1)[0] > 0.01
            return is_in_gts

        # center filter
        if 1:
            pos_anchors = anchors[pos_inds]
            pos_anchor_centers = self.anchor_center(pos_anchors) // stride[0]
            pos_bbox_targets = bbox_targets[pos_inds]
            pos_decode_bbox_targets = pos_bbox_targets / stride[0]

            is_in_gts = is_inside(pos_anchor_centers, pos_decode_bbox_targets)
            if is_in_gts == None:
                ...
            else:
                pos_inds = pos_inds[is_in_gts]

        if len(pos_inds) > 0:
            pos_bbox_targets = bbox_targets[pos_inds]
            pos_bbox_pred = bbox_pred[pos_inds]
            pos_anchors = anchors[pos_inds]
            # 向下取整，让四个anchor的中心都浓缩成同一个。同时要删去那些中心点在gtbbox之外
            # 的anchors。
            #  pos_anchor_centers = self.anchor_center(pos_anchors) // stride[0]

            # [stride4downsample] 2x2个anchor的中心映射到stride4上featmap统一成左上的中心，即feature point
            if stride[0] == 4:
                pos_anchor_centers = self.anchor_center(pos_anchors) // 8
            else:
                pos_anchor_centers = self.anchor_center(pos_anchors) / stride[0]

            #  if 0:
            #      saved_path = "tmp_save/s4downsample_zsb"
            #      txt_file = os.path.join(saved_path, "anchor_center.txt")
            #      with open(txt_file, "a") as f:
            #          written_msgs = img_metas[0]["filename"]
            #          written_msgs += "\n"
            #          written_msgs += f"\tstride: {stride[0]}\n"
            #          written_msgs += (
            #              f"\t\tanchor_center: {pos_anchor_centers.cpu().tolist()}\n"
            #          )
            #          tmp_gt_bboxes = (pos_bbox_targets / stride[0]).cpu().tolist()
            #          written_msgs += f"\t\tgt_bboxes: {tmp_gt_bboxes}\n"
            #
            #          f.write(written_msgs)

            weight_targets = cls_score.detach().sigmoid()
            weight_targets = weight_targets.max(dim=1)[0][pos_inds]
            # get real bbox location rather than distance from boundary.
            # 利用正anchor的中心，得到bbox_pred解码之后的bbox顶点坐标。
            pos_decode_bbox_pred = distance2bbox(pos_anchor_centers, pos_bbox_pred)
            # 把坐标变成了基于featmap的坐标
            # [stride4downsample] targets仍然除以4(非整除，这样保证featmap上loss和原图loss只是整数倍的关系)
            # TODO: 这里存疑，因为anchor中心变成了1/8，真实框只是变成了1/4，在几何上是没有匹配的。
            if stride[0] == 4:
                pos_decode_bbox_targets = pos_bbox_targets / 8
            else:
                pos_decode_bbox_targets = pos_bbox_targets / stride[0]

            is_in_gts = is_inside(pos_anchor_centers, pos_decode_bbox_targets)

            score[pos_inds] = bbox_overlaps(
                pos_decode_bbox_pred.detach(), pos_decode_bbox_targets, is_aligned=True
            )

            # regression loss
            loss_bbox = self.loss_bbox(
                pos_decode_bbox_pred,
                pos_decode_bbox_targets,
                weight=weight_targets,
                avg_factor=1.0,
            )

        else:
            loss_bbox = bbox_pred.sum() * 0
            weight_targets = bbox_pred.new_tensor(0)

        # cls (qfl) loss
        loss_cls = self.loss_cls(
            cls_score,
            (labels, score),
            weight=label_weights,
            avg_factor=num_total_samples,
        )
        return loss_cls, loss_bbox, weight_targets.sum()

    @force_fp32(apply_to=("cls_scores", "bbox_preds"))
    def loss(
        self,
        cls_scores,
        bbox_preds,
        gt_bboxes,
        gt_labels,
        img_metas,
        gt_bboxes_ignore=None,
    ):
        """Compute losses of the head.

        Args:
            cls_scores (list[Tensor]): Cls and quality scores for each scale
                level has shape (N, num_classes, H, W).
            bbox_preds (list[Tensor]): Box distribution logits for each scale
                level with shape (N, 4*(n+1), H, W), n is max value of integral
                set.
            gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
                shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
            gt_labels (list[Tensor]): class indices corresponding to each box
            img_metas (list[dict]): Meta information of each image, e.g.,
                image size, scaling factor, etc.
            gt_bboxes_ignore (list[Tensor] | None): specify which bounding
                boxes can be ignored when computing the loss.

        Returns:
            dict[str, Tensor]: A dictionary of loss components.
        """

        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
        # origin stride4 anchors
        featmap_sizes[0] = torch.Size(
            [featmap_sizes[0][0] * 2, featmap_sizes[0][1] * 2]
        )
        assert len(featmap_sizes) == self.anchor_generator.num_levels

        device = cls_scores[0].device
        anchor_list, valid_flag_list = self.get_anchors(
            featmap_sizes, img_metas, device=device
        )
        label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1

        cls_reg_targets = self.get_targets(
            anchor_list,
            valid_flag_list,
            gt_bboxes,
            img_metas,
            gt_bboxes_ignore_list=gt_bboxes_ignore,
            gt_labels_list=gt_labels,
            label_channels=label_channels,
            featmap_sizes=featmap_sizes,
        )
        if cls_reg_targets is None:
            return None

        (
            anchor_list,
            labels_list,
            label_weights_list,
            bbox_targets_list,
            bbox_weights_list,
            num_total_pos,
            num_total_neg,
        ) = cls_reg_targets

        num_total_samples = reduce_mean(
            torch.tensor(num_total_pos, dtype=torch.float, device=device)
        ).item()
        num_total_samples = max(num_total_samples, 1.0)

        losses_cls, losses_bbox, avg_factor = multi_apply(
            self.loss_single,
            anchor_list,
            cls_scores,
            bbox_preds,
            labels_list,
            label_weights_list,
            bbox_targets_list,
            self.anchor_generator.strides,
            num_total_samples=num_total_samples,
            img_metas=img_metas,
        )

        avg_factor = sum(avg_factor)
        avg_factor = reduce_mean(avg_factor).clamp_(min=1).item()
        losses_bbox = list(map(lambda x: x / avg_factor, losses_bbox))
        # breakpoint()
        # if losses_bbox[0].item() < 0:
        #    breakpoint()
        return dict(loss_cls=losses_cls, loss_bbox=losses_bbox)

    def _get_bboxes(
        self,
        cls_scores,
        bbox_preds,
        mlvl_anchors,
        img_shapes,
        scale_factors,
        cfg,
        rescale=False,
        with_nms=True,
    ):
        """Transform outputs for a single batch item into labeled boxes.

        Args:
            cls_scores (list[Tensor]): Box scores for a single scale level
                has shape (N, num_classes, H, W).
            bbox_preds (list[Tensor]): Box distribution logits for a single
                scale level with shape (N, 4*(n+1), H, W), n is max value of
                integral set.
            mlvl_anchors (list[Tensor]): Box reference for a single scale level
                with shape (num_total_anchors, 4).
            img_shapes (list[tuple[int]]): Shape of the input image,
                list[(height, width, 3)].
            scale_factors (list[ndarray]): Scale factor of the image arange as
                (w_scale, h_scale, w_scale, h_scale).
            cfg (mmcv.Config | None): Test / postprocessing configuration,
                if None, test_cfg would be used.
            rescale (bool): If True, return boxes in original image space.
                Default: False.
            with_nms (bool): If True, do nms before return boxes.
                Default: True.

        Returns:
            list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.
                The first item is an (n, 5) tensor, where 5 represent
                (tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1.
                The shape of the second tensor in the tuple is (n,), and
                each element represents the class label of the corresponding
                box.
        """
        cfg = self.test_cfg if cfg is None else cfg
        assert len(cls_scores) == len(bbox_preds) == len(mlvl_anchors)
        batch_size = cls_scores[0].shape[0]

        mlvl_bboxes = []
        mlvl_scores = []
        for cls_score, bbox_pred, stride, anchors in zip(
            cls_scores, bbox_preds, self.anchor_generator.strides, mlvl_anchors
        ):
            assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
            assert stride[0] == stride[1]
            scores = (
                cls_score.permute(0, 2, 3, 1)
                .reshape(batch_size, -1, self.cls_out_channels)
                .sigmoid()
            )
            bbox_pred = bbox_pred.permute(0, 2, 3, 1)

            if stride[0] == 4:
                bbox_pred = bbox_pred * 8
            else:
                bbox_pred = bbox_pred * stride[0]
            bbox_pred = bbox_pred.reshape(batch_size, -1, 4)

            nms_pre = cfg.get("nms_pre", -1)
            if nms_pre > 0 and scores.shape[1] > nms_pre:
                max_scores, _ = scores.max(-1)
                _, topk_inds = max_scores.topk(nms_pre)
                batch_inds = (
                    torch.arange(batch_size).view(-1, 1).expand_as(topk_inds).long()
                )
                anchors = anchors[topk_inds, :]
                bbox_pred = bbox_pred[batch_inds, topk_inds, :]
                scores = scores[batch_inds, topk_inds, :]
            else:
                anchors = anchors.expand_as(bbox_pred)

            bboxes = distance2bbox(
                self.anchor_center(anchors), bbox_pred, max_shape=img_shapes
            )
            mlvl_bboxes.append(bboxes)
            mlvl_scores.append(scores)

        batch_mlvl_bboxes = torch.cat(mlvl_bboxes, dim=1)
        if rescale:
            batch_mlvl_bboxes /= batch_mlvl_bboxes.new_tensor(scale_factors).unsqueeze(
                1
            )

        batch_mlvl_scores = torch.cat(mlvl_scores, dim=1)
        # Add a dummy background class to the backend when using sigmoid
        # remind that we set FG labels to [0, num_class-1] since mmdet v2.0
        # BG cat_id: num_class
        padding = batch_mlvl_scores.new_zeros(batch_size, batch_mlvl_scores.shape[1], 1)
        batch_mlvl_scores = torch.cat([batch_mlvl_scores, padding], dim=-1)

        if with_nms:
            det_results = []
            for (mlvl_bboxes, mlvl_scores) in zip(batch_mlvl_bboxes, batch_mlvl_scores):
                det_bbox, det_label = multiclass_nms(
                    mlvl_bboxes, mlvl_scores, cfg.score_thr, cfg.nms, cfg.max_per_img
                )
                det_results.append(tuple([det_bbox, det_label]))
        else:
            det_results = [
                tuple(mlvl_bs) for mlvl_bs in zip(batch_mlvl_bboxes, batch_mlvl_scores)
            ]
        return det_results

    @force_fp32(apply_to=("cls_scores", "bbox_preds"))
    def get_bboxes(
        self, cls_scores, bbox_preds, img_metas, cfg=None, rescale=False, with_nms=True
    ):
        """Transform network output for a batch into bbox predictions.

        Args:
            cls_scores (list[Tensor]): Box scores for each level in the
                feature pyramid, has shape
                (N, num_anchors * num_classes, H, W).
            bbox_preds (list[Tensor]): Box energies / deltas for each
                level in the feature pyramid, has shape
                (N, num_anchors * 4, H, W).
            img_metas (list[dict]): Meta information of each image, e.g.,
                image size, scaling factor, etc.
            cfg (mmcv.Config | None): Test / postprocessing configuration,
                if None, test_cfg would be used
            rescale (bool): If True, return boxes in original image space.
                Default: False.
            with_nms (bool): If True, do nms before return boxes.
                Default: True.

        Returns:
            list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.
                The first item is an (n, 5) tensor, where 5 represent
                (tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1.
                The shape of the second tensor in the tuple is (n,), and
                each element represents the class label of the corresponding
                box.

        Example:
            >>> import mmcv
            >>> self = AnchorHead(
            >>>     num_classes=9,
            >>>     in_channels=1,
            >>>     anchor_generator=dict(
            >>>         type='AnchorGenerator',
            >>>         scales=[8],
            >>>         ratios=[0.5, 1.0, 2.0],
            >>>         strides=[4,]))
            >>> img_metas = [{'img_shape': (32, 32, 3), 'scale_factor': 1}]
            >>> cfg = mmcv.Config(dict(
            >>>     score_thr=0.00,
            >>>     nms=dict(type='nms', iou_thr=1.0),
            >>>     max_per_img=10))
            >>> feat = torch.rand(1, 1, 3, 3)
            >>> cls_score, bbox_pred = self.forward_single(feat)
            >>> # note the input lists are over different levels, not images
            >>> cls_scores, bbox_preds = [cls_score], [bbox_pred]
            >>> result_list = self.get_bboxes(cls_scores, bbox_preds,
            >>>                               img_metas, cfg)
            >>> det_bboxes, det_labels = result_list[0]
            >>> assert len(result_list) == 1
            >>> assert det_bboxes.shape[1] == 5
            >>> assert len(det_bboxes) == len(det_labels) == cfg.max_per_img
        """
        assert len(cls_scores) == len(bbox_preds)
        num_levels = len(cls_scores)

        device = cls_scores[0].device
        featmap_sizes = [cls_scores[i].shape[-2:] for i in range(num_levels)]
        mlvl_anchors = self.inference_anchor_generator.grid_anchors(
            featmap_sizes, device=device
        )

        mlvl_cls_scores = [cls_scores[i].detach() for i in range(num_levels)]
        mlvl_bbox_preds = [bbox_preds[i].detach() for i in range(num_levels)]

        if torch.onnx.is_in_onnx_export():
            assert (
                len(img_metas) == 1
            ), "Only support one input image while in exporting to ONNX"
            img_shapes = img_metas[0]["img_shape_for_onnx"]
        else:
            img_shapes = [
                img_metas[i]["img_shape"] for i in range(cls_scores[0].shape[0])
            ]
        scale_factors = [
            img_metas[i]["scale_factor"] for i in range(cls_scores[0].shape[0])
        ]

        if with_nms:
            # some heads don't support with_nms argument
            result_list = self._get_bboxes(
                mlvl_cls_scores,
                mlvl_bbox_preds,
                mlvl_anchors,
                img_shapes,
                scale_factors,
                cfg,
                rescale,
            )
        else:
            result_list = self._get_bboxes(
                mlvl_cls_scores,
                mlvl_bbox_preds,
                mlvl_anchors,
                img_shapes,
                scale_factors,
                cfg,
                rescale,
                with_nms,
            )
        return result_list


count = 0


@HEADS.register_module()
class GFLSinOut_S4downsample_shift(GFLSinOut_slim_norm_bbox):
    def __init__(
        self,
        num_classes,
        in_channels,
        num_ins,
        stacked_convs=4,
        conv_cfg=None,
        norm_cfg=dict(type="GN", num_groups=32, requires_grad=True),
        loss_dfl=dict(type="DistributionFocalLoss", loss_weight=0.25),
        loss_shift=dict(type="MSELoss", loss_weight=0.25),
        reg_max=16,
        integral_sparse=1,
        test_model=False,
        s4_bbox_weight=1,
        anchor_generator=dict(
            type="AnchorGenerator",
            scales=[8, 16, 32],
            ratios=[0.5, 1.0, 2.0],
            strides=[4, 8, 16, 32, 64],
        ),
        inference_anchor_generator=dict(
            type="AnchorGenerator",
            scales=[8, 16, 32],
            ratios=[0.5, 1.0, 2.0],
            strides=[4, 8, 16, 32, 64],
        ),
        **kwargs,
    ):
        super(GFLSinOut_S4downsample_shift, self).__init__(
            num_classes,
            in_channels,
            num_ins,
            stacked_convs,
            conv_cfg=conv_cfg,
            norm_cfg=norm_cfg,
            loss_dfl=loss_dfl,
            reg_max=reg_max,
            integral_sparse=integral_sparse,
            **kwargs,
        )
        self.anchor_generator = build_anchor_generator(anchor_generator)
        self.inference_anchor_generator = build_anchor_generator(
            inference_anchor_generator
        )
        self.loss_shift = build_loss(loss_shift)
        self.test_model = test_model
        self.s4_bbox_weight = s4_bbox_weight
        # previous_gt_num denotes the number of gt assigned by dense anchors.
        # current_gt_num denotes the number of gt assigned by sparse anchors
        # remapped by dense anchors.
        self.previous_gt_num = 0
        self.current_gt_num = 0
        self.s4ds_anchor_num = 0

    def _init_layers(self):
        """Initialize layers of the head."""
        # add shift branch to select which dense anchor automatically
        super()._init_layers()
        # shift branch only work on the bottom level of fpn
        self.shift_convs = DenseBlock(
            self.stacked_convs,
            self.in_channels,
            self.feat_channels,
            self.conv_cfg,
            self.norm_cfg,
        )
        # using sigmoid
        self.gfl_shift = nn.Sequential(
            nn.Conv2d(self.feat_channels, 2, 3, padding=1),
            #  nn.BatchNorm2d(2),
            nn.Sigmoid(),
        )

    def init_weights(self):
        """Initialize weights of the head."""
        super().init_weights()
        self.shift_convs.init_weights()
        normal_init(self.gfl_shift, std=0.01)

    def forward(self, inputs):
        """forward.

        Args:
            inputs:
        """
        outs = []
        for idx, inp in enumerate(inputs):
            cls_feat = self.cls_convs[idx](inp)
            reg_feat = self.reg_convs[idx](inp)
            cls_score = self.gfl_cls[idx](cls_feat)
            bbox_pred = self.scales[idx](self.gfl_reg[idx](reg_feat)).float()
            if idx == 0:
                shift_feat = self.shift_convs(inp)
                shift_score = self.gfl_shift(shift_feat)
                outs.append([cls_score, bbox_pred, shift_score])
            else:
                # 给其他层构建全0的shift_score
                shape = bbox_pred.shape
                shape = torch.tensor(shape)
                shape[1] = 2
                # build pseudo shift_score, all zero
                shift_score = bbox_pred.new_full(shape.tolist(), 0)
                outs.append([cls_score, bbox_pred, shift_score])
        return tuple(map(list, zip(*outs)))

    def loss_single(
        self,
        anchors,
        cls_score,
        bbox_pred,
        shift_score,
        labels,
        label_weights,
        bbox_targets,
        stride,
        num_total_samples,
        img_metas,
    ):
        """loss_single.

        Args:
            anchors:
            cls_score:
            bbox_pred:
            shift_score:
            labels:
            label_weights:
            bbox_targets:
            stride:
            num_total_samples:
            img_metas:
        """
        assert stride[0] == stride[1], "h stride is not equal to w stride!"
        if stride[0] == 4:
            #  stride = [i * 2 for i in stride]
            H, W = cls_score.shape[2:4]
            # iou between anchors and targets
            ious = bbox_overlaps(anchors, bbox_targets, "iou", True)
            ious = ious.reshape(-1, H * 2, W * 2)
            max_iou2x2, max_iou2x2_inds = F.max_pool2d(ious, 2, return_indices=True)
            # select one from each 2x2
            max_iou2x2_inds = max_iou2x2_inds.reshape(ious.shape[0], -1)
            # generate dim1(batch) inds
            dim1_inds = (
                torch.tensor(range(ious.shape[0]))
                .reshape(-1, 1)
                .expand(max_iou2x2_inds.shape)
                .cuda(max_iou2x2.device)
            )
            # dim2(num total anchors) inds
            dim2_inds = max_iou2x2_inds.reshape(-1)
            dim1_inds = dim1_inds.reshape(-1)
            # (H*W, 2)
            inds = torch.stack((dim1_inds, dim2_inds), dim=1)

            pos_inds = ((labels >= 0) & (labels < 2)).nonzero().squeeze(1)
            if len(pos_inds) > 0:
                previous_target = bbox_targets[pos_inds[:, 0], pos_inds[:, 1]]

            # get final assign results
            anchors = anchors[inds[:, 0], inds[:, 1], :]
            bbox_targets = bbox_targets[inds[:, 0], inds[:, 1], :]
            labels = labels[inds[:, 0], inds[:, 1]]
            label_weights = label_weights[inds[:, 0], inds[:, 1]]

        anchors = anchors.reshape(-1, 4)
        cls_score = cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels)
        bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)
        shift_score = shift_score.permute(0, 2, 3, 1).reshape(-1, 2)

        bbox_targets = bbox_targets.reshape(-1, 4)
        labels = labels.reshape(-1)
        label_weights = label_weights.reshape(-1)

        # FG cat_id: [0, num_classes -1], BG cat_id: num_classes
        bg_class_ind = self.num_classes
        pos_inds = ((labels >= 0) & (labels < bg_class_ind)).nonzero().squeeze(1)
        score = label_weights.new_zeros(labels.shape)

        def is_inside(anchor_centers, gt_bboxes):
            l_ = anchor_centers[:, 0] - gt_bboxes[:, 0]
            t_ = anchor_centers[:, 1] - gt_bboxes[:, 1]
            r_ = gt_bboxes[:, 2] - anchor_centers[:, 0]
            b_ = gt_bboxes[:, 3] - anchor_centers[:, 1]
            # prevent there being no gt in imgs
            if l_.size()[0] == 0:
                is_in_gts = []
            else:
                is_in_gts = torch.stack([l_, t_, r_, b_], dim=1).min(dim=1)[0] > 0.01

            return is_in_gts

        # center filter ----
        if stride[0] == 4:
            pos_anchors = anchors[pos_inds]
            pos_shift = shift_score[pos_inds]

            pos_anchor_centers = self.anchor_center(pos_anchors) // 8
            # 0：代表左边和上边的点；0.5代表右边和下边的点。
            pos_anchor_centers += ste_round_func(pos_shift) * 0.5
            pos_bbox_targets = bbox_targets[pos_inds]
            pos_decode_bbox_targets = pos_bbox_targets / 8

            pos_anchor_centers_noshift = self.anchor_center(pos_anchors) // 8

            is_in_gts = is_inside(pos_anchor_centers, pos_decode_bbox_targets)
            is_in_gts_noshift = is_inside(
                pos_anchor_centers_noshift, pos_decode_bbox_targets
            )
            pos_bbox_inds = pos_inds[is_in_gts]
            pos_shift_inds = pos_inds

            if self.test_model:
                if is_in_gts != []:
                    self.s4ds_anchor_num += is_in_gts.sum().tolist()
                    print(f"the s4 pos anchor num: {self.s4ds_anchor_num}")
                # print gt and pred bbox with shift & without shift
                #  if is_in_gts != [] and not is_in_gts.equal(is_in_gts_noshift):
                #      today_date = datetime.today().date().__str__()
                #      saved_path = (
                #          f"tmp_save/with_shift_and_without_shift/{today_date}/shift_1.0"
                #      )
                #      os.makedirs(saved_path, exist_ok=True)
                #      print_gt_pred_helper(
                #          pos_bbox_targets,
                #          is_in_gts,
                #          is_in_gts_noshift,
                #          pos_anchor_centers * 8,
                #          pos_anchor_centers_noshift * 8,
                #          bbox_pred[pos_inds] * 8,
                #          img_metas,
                #          saved_path,
                #      )

                # examine the degradation of shift-loss ---------------
                if "previous_target" in dir():
                    self.previous_gt_num, self.current_gt_num = compare_pre_cur_target(
                        previous_target,
                        bbox_targets[pos_bbox_inds],
                        img_metas,
                        self.previous_gt_num,
                        self.current_gt_num,
                    )

                # only look the first hundred images.
                global count
                count = count + 1
                if count > 100:
                    os._exit(0)
        else:
            pos_shift_inds = []
            pos_bbox_inds = pos_inds
        # end---------

        if len(pos_shift_inds) > 0:
            # compute the shift_loss
            pos_shift_pred = shift_score[pos_shift_inds]
            pos_shift_target = (
                self.anchor_center(pos_anchors) / 8
                - self.anchor_center(pos_anchors) // 8
            ) * 2

            loss_shift = self.loss_shift(pos_shift_pred, pos_shift_target)
        else:
            loss_shift = torch.tensor(0.0).to(shift_score.device)

        if len(pos_bbox_inds) > 0:
            pos_bbox_targets = bbox_targets[pos_bbox_inds]
            pos_bbox_pred = bbox_pred[pos_bbox_inds]
            pos_anchors = anchors[pos_bbox_inds]
            pos_shift = shift_score[pos_bbox_inds]
            pos_labels = labels[pos_bbox_inds]
            # [stride4downsample] 2x2个anchor的中心映射到stride4上featmap统一成左上的中心，即feature point
            # 真实框不用整除8，普通除以8就行。
            # 把坐标变成了基于featmap的坐标
            if stride[0] == 4:
                pos_anchor_centers = self.anchor_center(pos_anchors) // 8
                pos_anchor_centers += ste_round_func(pos_shift) * 0.5
                pos_decode_bbox_targets = pos_bbox_targets / 8
            else:
                pos_anchor_centers = self.anchor_center(pos_anchors) / stride[0]
                pos_decode_bbox_targets = pos_bbox_targets / stride[0]

            # increase the lossweight in stride4 level
            weight_targets = cls_score.detach().sigmoid()
            if stride[0] == 4:
                # only increase the licence object loss, the licence class is 1.
                weight_targets[pos_bbox_inds[pos_labels == 1]] *= self.s4_bbox_weight
            weight_targets = weight_targets.max(dim=1)[0][pos_bbox_inds]
            # 利用正anchor的中心，得到bbox_pred解码之后的bbox顶点坐标。
            pos_decode_bbox_pred = distance2bbox(pos_anchor_centers, pos_bbox_pred)

            is_in_gts = is_inside(pos_anchor_centers, pos_decode_bbox_targets)

            score[pos_bbox_inds] = bbox_overlaps(
                pos_decode_bbox_pred.detach(), pos_decode_bbox_targets, is_aligned=True
            )

            # regression loss
            loss_bbox = self.loss_bbox(
                pos_decode_bbox_pred,
                pos_decode_bbox_targets,
                weight=weight_targets,
                avg_factor=1.0,
            )

        else:
            loss_bbox = bbox_pred.sum() * 0
            weight_targets = bbox_pred.new_tensor(0)

        # cls (qfl) loss, use num_total_samples as average
        if stride[0] == 4:
            loss_cls = self.loss_cls(
                cls_score,
                (labels, score),
                weight=label_weights * self.s4_bbox_weight,
                avg_factor=num_total_samples,
            )
        else:
            loss_cls = self.loss_cls(
                cls_score,
                (labels, score),
                weight=label_weights,
                avg_factor=num_total_samples,
            )
        return loss_cls, loss_bbox, loss_shift, weight_targets.sum()
        #  return loss_cls, loss_bbox, weight_targets.sum()

    @force_fp32(apply_to=("cls_scores", "bbox_preds", "shift_score"))
    def loss(
        self,
        cls_scores,
        bbox_preds,
        shift_score,
        gt_bboxes,
        gt_labels,
        img_metas,
        gt_bboxes_ignore=None,
    ):
        """Compute losses of the head.

        Args:
            cls_scores (list[Tensor]): Cls and quality scores for each scale
                level has shape (N, num_classes, H, W).
            bbox_preds (list[Tensor]): Box distribution logits for each scale
                level with shape (N, 4*(n+1), H, W), n is max value of integral
                set.
            gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
                shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
            gt_labels (list[Tensor]): class indices corresponding to each box
            img_metas (list[dict]): Meta information of each image, e.g.,
                image size, scaling factor, etc.
            gt_bboxes_ignore (list[Tensor] | None): specify which bounding
                boxes can be ignored when computing the loss.

        Returns:
            dict[str, Tensor]: A dictionary of loss components.
        """

        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
        # origin stride4 anchors
        featmap_sizes[0] = torch.Size(
            [featmap_sizes[0][0] * 2, featmap_sizes[0][1] * 2]
        )
        assert len(featmap_sizes) == self.anchor_generator.num_levels

        device = cls_scores[0].device
        anchor_list, valid_flag_list = self.get_anchors(
            featmap_sizes, img_metas, device=device
        )
        label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1

        cls_reg_targets = self.get_targets(
            anchor_list,
            valid_flag_list,
            gt_bboxes,
            img_metas,
            gt_bboxes_ignore_list=gt_bboxes_ignore,
            gt_labels_list=gt_labels,
            label_channels=label_channels,
            featmap_sizes=featmap_sizes,
        )
        if cls_reg_targets is None:
            return None

        (
            anchor_list,
            labels_list,
            label_weights_list,
            bbox_targets_list,
            bbox_weights_list,
            num_total_pos,
            num_total_neg,
        ) = cls_reg_targets

        num_total_samples = reduce_mean(
            torch.tensor(num_total_pos, dtype=torch.float, device=device)
        ).item()
        num_total_samples = max(num_total_samples, 1.0)

        losses_cls, losses_bbox, losses_shift, avg_factor = multi_apply(
            self.loss_single,
            anchor_list,
            cls_scores,
            bbox_preds,
            shift_score,
            labels_list,
            label_weights_list,
            bbox_targets_list,
            self.anchor_generator.strides,
            num_total_samples=num_total_samples,
            img_metas=img_metas,
        )

        avg_factor = sum(avg_factor)
        avg_factor = reduce_mean(avg_factor).clamp_(min=1).item()
        losses_bbox = list(map(lambda x: x / avg_factor, losses_bbox))
        # breakpoint()
        # if losses_bbox[0].item() < 0:
        #    breakpoint()
        return dict(loss_cls=losses_cls, loss_bbox=losses_bbox, loss_shift=losses_shift)

    def _get_bboxes(
        self,
        cls_scores,
        bbox_preds,
        shift_scores,
        mlvl_anchors,
        img_shapes,
        scale_factors,
        cfg,
        rescale=False,
        with_nms=True,
    ):
        """Transform outputs for a single batch item into labeled boxes.

        Args:
            cls_scores (list[Tensor]): Box scores for a single scale level
                has shape (N, num_classes, H, W).
            bbox_preds (list[Tensor]): Box distribution logits for a single
                scale level with shape (N, 4*(n+1), H, W), n is max value of
                integral set.
            mlvl_anchors (list[Tensor]): Box reference for a single scale level
                with shape (num_total_anchors, 4).
            img_shapes (list[tuple[int]]): Shape of the input image,
                list[(height, width, 3)].
            scale_factors (list[ndarray]): Scale factor of the image arange as
                (w_scale, h_scale, w_scale, h_scale).
            cfg (mmcv.Config | None): Test / postprocessing configuration,
                if None, test_cfg would be used.
            rescale (bool): If True, return boxes in original image space.
                Default: False.
            with_nms (bool): If True, do nms before return boxes.
                Default: True.

        Returns:
            list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.
                The first item is an (n, 5) tensor, where 5 represent
                (tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1.
                The shape of the second tensor in the tuple is (n,), and
                each element represents the class label of the corresponding
                box.
        """
        cfg = self.test_cfg if cfg is None else cfg
        assert (
            len(cls_scores) == len(bbox_preds) == len(mlvl_anchors) == len(shift_scores)
        )
        batch_size = cls_scores[0].shape[0]

        mlvl_bboxes = []
        mlvl_scores = []
        for cls_score, bbox_pred, shift_score, stride, anchors in zip(
            cls_scores,
            bbox_preds,
            shift_scores,
            self.anchor_generator.strides,
            mlvl_anchors,
        ):
            assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
            assert stride[0] == stride[1]
            scores = (
                cls_score.permute(0, 2, 3, 1)
                .reshape(batch_size, -1, self.cls_out_channels)
                .sigmoid()
            )
            bbox_pred = bbox_pred.permute(0, 2, 3, 1)
            shift_score = shift_score.permute(0, 2, 3, 1).reshape(batch_size, -1, 2)

            if stride[0] == 4:
                bbox_pred = bbox_pred * 8
            else:
                bbox_pred = bbox_pred * stride[0]
            bbox_pred = bbox_pred.reshape(batch_size, -1, 4)

            nms_pre = cfg.get("nms_pre", -1)
            if nms_pre > 0 and scores.shape[1] > nms_pre:
                max_scores, _ = scores.max(-1)
                _, topk_inds = max_scores.topk(nms_pre)
                batch_inds = (
                    torch.arange(batch_size).view(-1, 1).expand_as(topk_inds).long()
                )
                anchors = anchors[topk_inds, :]
                bbox_pred = bbox_pred[batch_inds, topk_inds, :]
                scores = scores[batch_inds, topk_inds, :]
                shift_score = shift_score[batch_inds, topk_inds, :]
            else:
                anchors = anchors.expand_as(bbox_pred)

            # modify anchor center
            if stride[0] == 4:
                anchor_center = self.anchor_center(anchors)
                # here we should use true shift in origin image: {0,4}
                anchor_center = anchor_center.repeat(batch_size, 1, 1)
                #
                anchor_center += ste_round_func(shift_score) * 4
            else:
                anchor_center = self.anchor_center(anchors)

            bboxes = distance2bbox(anchor_center, bbox_pred, max_shape=img_shapes)
            mlvl_bboxes.append(bboxes)
            mlvl_scores.append(scores)

        batch_mlvl_bboxes = torch.cat(mlvl_bboxes, dim=1)
        if rescale:
            batch_mlvl_bboxes /= batch_mlvl_bboxes.new_tensor(scale_factors).unsqueeze(
                1
            )

        batch_mlvl_scores = torch.cat(mlvl_scores, dim=1)
        # Add a dummy background class to the backend when using sigmoid
        # remind that we set FG labels to [0, num_class-1] since mmdet v2.0
        # BG cat_id: num_class
        padding = batch_mlvl_scores.new_zeros(batch_size, batch_mlvl_scores.shape[1], 1)
        batch_mlvl_scores = torch.cat([batch_mlvl_scores, padding], dim=-1)

        if with_nms:
            det_results = []
            for (mlvl_bboxes, mlvl_scores) in zip(batch_mlvl_bboxes, batch_mlvl_scores):
                det_bbox, det_label = multiclass_nms(
                    mlvl_bboxes, mlvl_scores, cfg.score_thr, cfg.nms, cfg.max_per_img
                )
                det_results.append(tuple([det_bbox, det_label]))
        else:
            det_results = [
                tuple(mlvl_bs) for mlvl_bs in zip(batch_mlvl_bboxes, batch_mlvl_scores)
            ]
        return det_results

    @force_fp32(apply_to=("cls_scores", "bbox_preds", "shift_score"))
    def get_bboxes(
        self,
        cls_scores,
        bbox_preds,
        shift_scores,
        img_metas,
        cfg=None,
        rescale=False,
        with_nms=True,
    ):
        """Transform network output for a batch into bbox predictions.

        Args:
            cls_scores (list[Tensor]): Box scores for each level in the
                feature pyramid, has shape
                (N, num_anchors * num_classes, H, W).
            bbox_preds (list[Tensor]): Box energies / deltas for each
                level in the feature pyramid, has shape
                (N, num_anchors * 4, H, W).
            shift_scores: (Tensor): anchor_center shifting of the bottom
                level of FPN, has shape (N, num_anchors * 2, H, W)
            img_metas (list[dict]): Meta information of each image, e.g.,
                image size, scaling factor, etc.
            cfg (mmcv.Config | None): Test / postprocessing configuration,
                if None, test_cfg would be used
            rescale (bool): If True, return boxes in original image space.
                Default: False.
            with_nms (bool): If True, do nms before return boxes.
                Default: True.

        Returns:
            list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.
                The first item is an (n, 5) tensor, where 5 represent
                (tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1.
                The shape of the second tensor in the tuple is (n,), and
                each element represents the class label of the corresponding
                box.

        Example:
            >>> import mmcv
            >>> self = AnchorHead(
            >>>     num_classes=9,
            >>>     in_channels=1,
            >>>     anchor_generator=dict(
            >>>         type='AnchorGenerator',
            >>>         scales=[8],
            >>>         ratios=[0.5, 1.0, 2.0],
            >>>         strides=[4,]))
            >>> img_metas = [{'img_shape': (32, 32, 3), 'scale_factor': 1}]
            >>> cfg = mmcv.Config(dict(
            >>>     score_thr=0.00,
            >>>     nms=dict(type='nms', iou_thr=1.0),
            >>>     max_per_img=10))
            >>> feat = torch.rand(1, 1, 3, 3)
            >>> cls_score, bbox_pred = self.forward_single(feat)
            >>> # note the input lists are over different levels, not images
            >>> cls_scores, bbox_preds = [cls_score], [bbox_pred]
            >>> result_list = self.get_bboxes(cls_scores, bbox_preds,
            >>>                               img_metas, cfg)
            >>> det_bboxes, det_labels = result_list[0]
            >>> assert len(result_list) == 1
            >>> assert det_bboxes.shape[1] == 5
            >>> assert len(det_bboxes) == len(det_labels) == cfg.max_per_img
        """
        assert len(cls_scores) == len(bbox_preds)
        num_levels = len(cls_scores)

        device = cls_scores[0].device
        featmap_sizes = [cls_scores[i].shape[-2:] for i in range(num_levels)]
        mlvl_anchors = self.inference_anchor_generator.grid_anchors(
            featmap_sizes, device=device
        )

        mlvl_cls_scores = [cls_scores[i].detach() for i in range(num_levels)]
        mlvl_bbox_preds = [bbox_preds[i].detach() for i in range(num_levels)]
        mlvl_shift_scores = [shift_scores[i].detach() for i in range(num_levels)]

        if torch.onnx.is_in_onnx_export():
            assert (
                len(img_metas) == 1
            ), "Only support one input image while in exporting to ONNX"
            img_shapes = img_metas[0]["img_shape_for_onnx"]
        else:
            img_shapes = [
                img_metas[i]["img_shape"] for i in range(cls_scores[0].shape[0])
            ]
        scale_factors = [
            img_metas[i]["scale_factor"] for i in range(cls_scores[0].shape[0])
        ]

        if with_nms:
            # some heads don't support with_nms argument
            result_list = self._get_bboxes(
                mlvl_cls_scores,
                mlvl_bbox_preds,
                mlvl_shift_scores,
                mlvl_anchors,
                img_shapes,
                scale_factors,
                cfg,
                rescale,
            )
        else:
            result_list = self._get_bboxes(
                mlvl_cls_scores,
                mlvl_bbox_preds,
                mlvl_shift_scores,
                mlvl_anchors,
                img_shapes,
                scale_factors,
                cfg,
                rescale,
                with_nms,
            )
        return result_list


@HEADS.register_module()
class GFLSinOut_S4downsample_shift_coupling(GFLSinOut_S4downsample_shift):
    def __init__(self, **kwargs):
        super(GFLSinOut_S4downsample_shift_coupling, self).__init__(**kwargs)

    def _init_layers(self):
        """Initialize layers of the head."""
        # add shift branch to select which dense anchor automatically
        super()._init_layers()
        self.gfl_reg[0] = nn.Sequential(nn.Conv2d(self.feat_channels, 6, 3, padding=1))

    def forward(self, inputs):
        """forward.

        Args:
            inputs:
        """
        outs = []
        for idx, inp in enumerate(inputs):
            cls_feat = self.cls_convs[idx](inp)
            reg_feat = self.reg_convs[idx](inp)
            cls_score = self.gfl_cls[idx](cls_feat)
            # the first four number in C channel in gfl_reg is bbox regression
            # the last two number in C channel is shift score
            # use relu as bbox_pred's last output layer
            bbox_pred = self.scales[idx](
                torch.relu(self.gfl_reg[idx](reg_feat)[:, 0:4, ...])
            ).float()
            if idx == 0:
                # use sigmoid as shift-score's last output layer
                shift_score = torch.sigmoid(self.gfl_reg[idx](reg_feat)[:, -2:, ...])
                outs.append([cls_score, bbox_pred, shift_score])
            else:
                # build all zero in shift_score for other layers
                shape = bbox_pred.shape
                shape = torch.tensor(shape)
                shape[1] = 2
                # build pseudo shift_score, all zero
                shift_score = bbox_pred.new_full(shape.tolist(), 0)
                outs.append([cls_score, bbox_pred, shift_score])
        return tuple(map(list, zip(*outs)))
