import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import ops

from torchvision.ops import boxes as box_ops

from detection.layers import smooth_l1_loss, cat
from detection.modeling.utils import Sampler, Matcher, BoxCoder, batched_nms
from .anchor_generator import AnchorGenerator


class RPN(nn.Module):
    def __init__(self, cfg, in_channels=256):
        super().__init__()
        self.cfg = cfg
        # fmt:off
        batch_size          = cfg.MODEL.RPN.BATCH_SIZE_PER_IMAGE
        anchor_stride       = cfg.MODEL.RPN.ANCHOR_STRIDE
        anchor_scales       = cfg.MODEL.RPN.ANCHOR_SIZES
        anchor_ratios       = cfg.MODEL.RPN.ASPECT_RATIOS
        num_channels        = cfg.MODEL.RPN.NUM_CHANNELS
        nms_thresh          = cfg.MODEL.RPN.NMS_THRESH
        min_size            = cfg.MODEL.RPN.MIN_SIZE
        num_anchors         = len(anchor_ratios[0]) * len(anchor_scales[0])
        # fmt:on

        self.pre_nms_top_n = {
            True: cfg.MODEL.RPN.PRE_NMS_TOP_N_TRAIN,
            False: cfg.MODEL.RPN.PRE_NMS_TOP_N_TEST,
        }
        self.post_nms_top_n = {
            True: cfg.MODEL.RPN.POST_NMS_TOP_N_TRAIN,
            False: cfg.MODEL.RPN.POST_NMS_TOP_N_TEST,
        }
        self.nms_thresh = nms_thresh
        self.min_size = min_size
        self.batch_size = batch_size
        self.sample_threshold = [0.3, 0.7]

        num_channels = in_channels if num_channels is None else num_channels
        self.conv = nn.Conv2d(in_channels, num_channels, kernel_size=3, stride=1, padding=1)
        self.cls_logits = nn.Conv2d(num_channels, num_anchors, kernel_size=1, stride=1)
        self.bbox_pred = nn.Conv2d(num_channels, num_anchors * 4, kernel_size=1, stride=1)
        self.anchor_generator = AnchorGenerator(anchor_stride, anchor_scales, anchor_ratios)
        self.box_coder = BoxCoder(weights=(1.0, 1.0, 1.0, 1.0))
        self.matcher = Matcher(self.sample_threshold, labels=[0, -1, 1], allow_low_quality_matches=True)
        self.sampler = Sampler(batch_size, 0.5)

        for layer in self.modules():
            if isinstance(layer, nn.Conv2d):
                nn.init.normal_(layer.weight, std=0.01)
                nn.init.constant_(layer.bias, 0)

    def forward(self, images, features, img_metas, targets=None, is_da=False):
        anchors = self.anchor_generator(features)
        t = [F.relu(self.conv(feature)) for feature in features]
        logits = [self.cls_logits(c) for c in t]
        bbox_reg = [self.bbox_pred(b) for b in t]

        pred_objectness_logits = [
            score.permute(0, 2, 3, 1).flatten(1)
            for score in logits
        ]
        pred_anchor_deltas = [
            x.view(x.shape[0], -1, 4, x.shape[-2], x.shape[-1])
            .permute(0, 3, 4, 1, 2)
            .flatten(1, -2)
            for x in bbox_reg
        ]

        if self.training and targets is not None:
            rpn_cls_loss, rpn_reg_loss = self.losses(anchors, pred_objectness_logits, pred_anchor_deltas, img_metas, targets)
            loss = {
                'rpn_cls_loss': rpn_cls_loss,
                'rpn_reg_loss': rpn_reg_loss,
            }
        else:
            loss = {}

        with torch.no_grad():
            proposals = self.generate_proposals(anchors, pred_objectness_logits, pred_anchor_deltas, img_metas, is_da=is_da)

        return proposals, loss, logits

    def generate_proposals(self, anchors, pred_objectness_logits, pred_anchor_deltas, img_metas, is_da=False):
        # decode -->  pred_proposals
        proposals = self._decode_proposals(anchors, pred_anchor_deltas)
        #
        pre_nms_top_k = self.pre_nms_top_n[self.training]
        post_nms_top_k = self.post_nms_top_n[self.training]
        if is_da:
            post_nms_top_k = self.cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE
        nms_thresh = self.nms_thresh
        image_sizes = [x['img_shape'] for x in img_metas]
        min_box_size = self.min_size
        num_images = len(img_metas)
        device = proposals[0].device

        # 1. Select top-k anchor for every level and every image
        topk_scores = []  # #lvl Tensor, each of shape N x topk
        topk_proposals = []
        level_ids = []  # #lvl Tensor, each of shape (topk,)
        batch_idx = torch.arange(num_images, device=device)
        for level_id, (proposals_i, logits_i) in enumerate(zip(proposals, pred_objectness_logits)):
            Hi_Wi_A = logits_i.shape[1]
            num_proposals_i = min(Hi_Wi_A, pre_nms_top_k)
            logits_i, idx = logits_i.sort(descending=True, dim=1)
            topk_scores_i = logits_i.narrow(1, 0, num_proposals_i)
            topk_idx = idx.narrow(1, 0, num_proposals_i)
            # each is N x topk
            topk_proposals_i = proposals_i[batch_idx[:, None], topk_idx]  # N x topk x 4

            topk_proposals.append(topk_proposals_i)
            topk_scores.append(topk_scores_i)
            level_ids.append(torch.full((num_proposals_i,), level_id, dtype=torch.int64, device=device))
        
        # 2. Concat all levels together
        topk_scores = cat(topk_scores, dim=1)
        topk_proposals = cat(topk_proposals, dim=1)
        level_ids = cat(level_ids, dim=0)

        # 3. For each image, run a per-level NMS, and choose topk results.
        results = []
        for n, image_size in enumerate(image_sizes):
            boxes = topk_proposals[n]
            scores_per_img = topk_scores[n]
            lvl = level_ids

            valid_mask = torch.isfinite(boxes).all(dim=1) & torch.isfinite(scores_per_img)
            if not valid_mask.all():
                if self.training:
                    raise FloatingPointError(
                        "Predicted boxes or scores contain Inf/NaN. Training has diverged."
                    )
                boxes = boxes[valid_mask]
                scores_per_img = scores_per_img[valid_mask]
                lvl = lvl[valid_mask]
            boxes = box_ops.clip_boxes_to_image(boxes, image_size)
            keep = box_ops.remove_small_boxes(boxes, min_box_size)
            if len(keep) != len(boxes):
                boxes, scores_per_img, lvl = boxes[keep], scores_per_img[keep], lvl[keep]
            # NMS
            keep = batched_nms(boxes, scores_per_img, lvl, nms_thresh)
            keep = keep[:post_nms_top_k]
            proposal_boxes = boxes[keep]
            objectness_logits = scores_per_img[keep]
            results_i = torch.cat([proposal_boxes, objectness_logits.sigmoid_().view(-1, 1)], dim=1)
            assert results_i.shape[1] == 5,  "box_dim(4) + score_dim(1) = 5"
            results.append(results_i)

        return results
    
    def _decode_proposals(self, anchors, pred_anchor_deltas):
        """
        Transform anchors into proposals by applying the predicted anchor deltas.

        Returns:
            proposals (list[Tensor]): A list of L tensors. Tensor i has shape
                (N, Hi*Wi*A, B)
        """
        N = pred_anchor_deltas[0].shape[0]
        proposals = []
        # For each feature map
        for anchors_i, pred_anchor_deltas_i in zip(anchors, pred_anchor_deltas):
            B = anchors_i.size(1)
            pred_anchor_deltas_i = pred_anchor_deltas_i.reshape(-1, B)
            # Expand anchors to shape (N*Hi*Wi*A, B)
            anchors_i = anchors_i.unsqueeze(0).expand(N, -1, -1).reshape(-1, B)
            proposals_i = self.box_coder.decode(pred_anchor_deltas_i, anchors_i)
            # Append feature map proposals with shape (N, Hi*Wi*A, B)
            proposals.append(proposals_i.view(N, -1, B))
        return proposals

    def losses(self, mlvl_anchors, mlvl_objectness, mlvl_box_regression, img_metas, targets):
        gt_labels, matched_gt_boxes = self.label_and_sample_anchors(mlvl_anchors, img_metas, targets)
        num_images = len(gt_labels)
        gt_labels = torch.stack(gt_labels)

        pos_mask = gt_labels == 1
        
        # box loss
        localization_loss = self._dense_box_regression_loss(mlvl_anchors, mlvl_box_regression, matched_gt_boxes, pos_mask)
        # cls loss
        valid_mask = gt_labels >= 0
        objectness_loss = F.binary_cross_entropy_with_logits(
            cat(mlvl_objectness, dim=1)[valid_mask],
            gt_labels[valid_mask].to(torch.float32),
            reduction="sum",
        )
        normalizer = self.batch_size * num_images
        loss_rpn_cls = objectness_loss / normalizer
        loss_rpn_loc = localization_loss / normalizer

        return loss_rpn_cls, loss_rpn_loc
    
    def _dense_box_regression_loss(self, anchors, pred_anchor_deltas, gt_boxes, fg_mask, loss_type="smooth_l1", smooth_l1_beta=0.0):
        anchors = torch.cat(anchors, 0)  # (R, 4)
        if loss_type == "smooth_l1":
            gt_anchor_deltas = [self.box_coder.encode(anchors, k) for k in gt_boxes]
            gt_anchor_deltas = torch.stack(gt_anchor_deltas)    # (N, R, 4)
            loss_box_reg = smooth_l1_loss(
                cat(pred_anchor_deltas, dim=1)[fg_mask],
                gt_anchor_deltas[fg_mask],
                beta=1.0 / 9,
                reduction="sum",
            )
        elif loss_type == "giou":
            raise ValueError(f"current unsupport this loss type '{loss_type}'")
        else:
            raise ValueError(f"Invalid dense box regression loss type '{loss_type}'")
        
        return loss_box_reg

    def label_and_sample_anchors(self, mlvl_anchors, img_metas, targets):
        anchors = cat(mlvl_anchors, dim=0)
        gt_boxes = [x['boxes'] for x in targets]
        image_sizes = [x['img_shape'] for x in img_metas]
        del img_metas
        del targets

        gt_labels = []
        matched_gt_boxes = []
        for image_size_i, gt_boxes_i in zip(image_sizes, gt_boxes):
            match_quality_matrix = ops.box_iou(gt_boxes_i, anchors)
            matched_idxs, gt_labels_i = self.matcher(match_quality_matrix)
            gt_labels_i = gt_labels_i.to(gt_boxes_i.device)
            del match_quality_matrix
            # sample labels
            gt_labels_i = self._subsample_labels(gt_labels_i)
            matched_gt_boxes_i = gt_boxes_i[matched_idxs]

            gt_labels.append(gt_labels_i)
            matched_gt_boxes.append(matched_gt_boxes_i)
        return gt_labels, matched_gt_boxes
    
    def _subsample_labels(self, label):
        pos_idx, neg_idx = self.sampler(label, bg_label=0)
        # Fill with the ignore label (-1), then set positive and negative labels
        label.fill_(-1)
        label.scatter_(0, pos_idx, 1)
        label.scatter_(0, neg_idx, 0)
        return label
