import math
from functools import partial

import torch
import torch.nn as nn
import torch.nn.functional as F
from detection.layers import cat, losses, smooth_l1_loss
from detection.modeling.roi_heads.mask_head import (ConvUpSampleMaskHead,
                                                    mask_rcnn_loss)
from detection.modeling.utils import Sampler, BoxCoder, Matcher, batched_nms
from torchvision import models, ops
from torchvision.ops import boxes as box_ops

from .bbox_preditor import BOX_PREDICTORS


def select_foreground_proposals(proposals, labels):
    fg_proposals = []
    fg_select_masks = []
    for i, (proposals_per_img, label_per_img) in enumerate(zip(proposals, labels)):
        fg_mask = label_per_img > 0
        fg_idxs = fg_mask.nonzero().squeeze(1)
        fg_proposals.append(proposals_per_img[fg_idxs])
        fg_select_masks.append(fg_mask)
    return fg_proposals, fg_select_masks


class BoxHead(nn.Module):
    def __init__(self, cfg, in_channels=2048):
        super().__init__()
        # fmt:off
        batch_size           = cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE
        score_thresh         = cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST
        nms_thresh           = cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST
        detections_per_img   = cfg.MODEL.ROI_HEADS.DETECTIONS_PER_IMG

        box_predictor        = cfg.MODEL.ROI_BOX_HEAD.BOX_PREDICTOR
        num_classes          = cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES
        spatial_scale        = cfg.MODEL.ROI_BOX_HEAD.POOL_SPATIAL_SCALE
        pool_size            = cfg.MODEL.ROI_BOX_HEAD.POOL_RESOLUTION
        pool_type            = cfg.MODEL.ROI_BOX_HEAD.POOL_TYPE
        mask_on              = cfg.MODEL.MASK_ON
        # fmt:on

        self.score_thresh = score_thresh
        self.nms_thresh = nms_thresh
        self.detections_per_img = detections_per_img
        self.spatial_scale = spatial_scale
        self.pool_size = pool_size
        self.mask_on = mask_on
        self.num_classes = num_classes
        self.proposal_append_gt = True
        self.sample_threshold = [0.5,]
        self.box_reg_loss_type = "smooth_l1"

        pooler = []
        if pool_type == 'align':
            for spatial in self.spatial_scale:
                pooler.append(partial(ops.roi_align, output_size=(pool_size, pool_size), spatial_scale=eval(spatial), sampling_ratio=0, aligned=True))
        elif pool_type == 'pooling':
            for spatial in self.spatial_scale:
                pooler.append(partial(ops.roi_pool, output_size=(pool_size, pool_size), spatial_scale=eval(spatial)))
        else:
            raise ValueError('Unknown pool type {}'.format(pool_type))
        self.pooler = pooler

        self.box_predictor = BOX_PREDICTORS[box_predictor](cfg, in_channels)
        self.box_coder = BoxCoder(weights=(10.0, 10.0, 5.0, 5.0))       # weights=(10.0, 10.0, 5.0, 5.0)
        self.matcher = Matcher(self.sample_threshold, labels=[0, 1], allow_low_quality_matches=False)
        self.fg_bg_sampler = Sampler(batch_size, 0.25)

        if mask_on:
            self.mask_head = ConvUpSampleMaskHead(in_channels, num_classes=cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES)

    def forward(self, features, proposals, img_metas, targets=None, is_da=False):
        if self.training and targets is not None and not is_da:
            proposals = self.label_and_sample_proposals(proposals, targets)
        del targets

        box_features = []
        roi_features = []
        sampled_proposals = []
        for p in proposals:
            if not self.training:
                continue
            elif is_da:
                sampled_proposals.append(p[:, :4])
            else:
                sampled_proposals.append(p['proposal_boxes'])

        if self.training and not is_da:
            loss, box_features, roi_features = self._forward_box(features, proposals)
            dets = []
        else:
            loss = {}
            if is_da:
                dets = []
                box_features, roi_features = self._forward_box_inference(features, proposals, img_metas, is_da)
            else:
                dets = self._forward_box_inference(features, proposals, img_metas)

            if self.mask_on:
                dets = self.forward_mask(features, dets)

        return dets, loss, sampled_proposals, box_features, roi_features
    
    def _forward_box(self, features, proposals, img_metas=None):
        if len(self.pooler) < len(features):
            num_pooler = len(self.pooler)
            features = features[:num_pooler]
            assert len(features) == len(self.pooler)
        roi_features = self.roi_extractor(features, [x['proposal_boxes'] for x in proposals])
        predictions = self.box_predictor(roi_features)
        pooled_features = predictions[-1]
        predictions = predictions[:-1]
        # del roi_features

        rcnn_losses = self.fastrcnn_loss(predictions, proposals)
        return rcnn_losses, pooled_features, roi_features
    
    def _forward_box_inference(self, features, proposals, img_metas=None, is_da=False):
        if len(self.pooler) < len(features):
            num_pooler = len(self.pooler)
            features = features[:num_pooler]
            assert len(features) == len(self.pooler)
        proposals = [x[:, :4] for x in proposals]
        roi_features = self.roi_extractor(features, proposals)
        predictions = self.box_predictor(roi_features, is_da=is_da)
        pooled_features = predictions[-1]
        predictions = predictions[:-1]
        if is_da:
            return pooled_features, roi_features
        del roi_features

        dets = self.inference(predictions, proposals, img_metas)
        return dets
        
    def label_and_sample_proposals(self, proposals, targets):
        if self.proposal_append_gt:
            proposals = self.add_ground_truth_to_proposals(targets, proposals)
        
        proposals_with_gt = []
        for proposals_per_image, targets_per_image in zip(proposals, targets):
            has_gt = len(targets_per_image) > 0
            match_quality_matrix = box_ops.box_iou(
                targets_per_image['boxes'], proposals_per_image[:, :4]
                )
            matched_idxs, matched_labels = self.matcher(match_quality_matrix)
            sampled__idxs, gt_classes = self._sample_proposals(matched_idxs, matched_labels, targets_per_image['labels'])
            proposals_per_image = proposals_per_image[sampled__idxs]
            if has_gt:
                sampled_targets = matched_idxs[sampled__idxs]
                gt_boxes = targets_per_image['boxes'][sampled_targets]
            #
            proposals_with_gt_i = {}
            proposals_with_gt_i['proposal_boxes'] = proposals_per_image[:, :4]
            proposals_with_gt_i['objectness_logits'] = proposals_per_image[:, -1]
            proposals_with_gt_i['gt_classes'] = gt_classes
            proposals_with_gt_i['gt_boxes'] = gt_boxes
            proposals_with_gt.append(proposals_with_gt_i)
        
        return proposals_with_gt

    def _sample_proposals(self, matched_idxs, matched_labels, gt_classes):
        has_gt = gt_classes.numel() > 0
        if has_gt:
            gt_classes = gt_classes[matched_idxs]
            gt_classes[matched_labels == 0] = self.num_classes
            gt_classes[matched_labels == -1] = -1
        else:
            gt_classes = torch.zeros_like(matched_idxs) + self.num_classes
        
        sampled_fg_idxs, sampled_bg_idxs = self.fg_bg_sampler(gt_classes, bg_label=self.num_classes)
        sampled_idxs = torch.cat([sampled_fg_idxs, sampled_bg_idxs], dim=0)
        return sampled_idxs, gt_classes[sampled_idxs]
    
    def add_ground_truth_to_proposals(self, gt, proposals):
        assert gt is not None

        if len(proposals) != len(gt):
            raise ValueError("proposals and gt should have the same length as the number of images!")
        if len(proposals) == 0:
            return proposals
        
        device = proposals[0].device
        gt_logit_value = math.log((1.0 - 1e-10) / (1 - (1.0 - 1e-10)))
        results = []
        for gt_i, proposals_i in zip(gt, proposals):
            gt_boxes = gt_i['boxes']
            gt_logits = gt_logit_value * torch.ones(len(gt_boxes), device=device)
            gt_proposal = cat((gt_boxes, gt_logits.view(-1, 1)), dim=1)
            new_proposals = cat((proposals_i, gt_proposal), dim=0)
            results.append(new_proposals)

        return results

    def roi_extractor(self, features, proposals, finest_scale=56):
        num_level = len(self.pooler)
        features = features[:num_level]

        rois_list = []
        for img_id, bboxes in enumerate(proposals):
            img_inds = bboxes.new_full((bboxes.size(0), 1), img_id)
            lvl_bbox = torch.cat([img_inds, bboxes], dim=-1)
            rois_list.append(lvl_bbox)

        rois = torch.cat(rois_list, dim=0)

        # sigle level
        if num_level == 1:
            roi_feats = self.pooler[0](features[0], rois)
            return roi_feats

        scale = torch.sqrt(
            (rois[:, 3] - rois[:, 1]) * (rois[:, 4] - rois[:, 2])
        )
        target_lvls = torch.floor(torch.log2(scale / finest_scale + 1e-6))
        target_lvls = target_lvls.clamp(min=0, max=num_level-1).long()

        roi_feats = features[0].new_zeros(rois.size(0), features[0].size(1), self.pool_size, self.pool_size)
        for i in range(num_level):
            mask = target_lvls == i
            inds = mask.nonzero(as_tuple=False).squeeze(1)
            if inds.numel() > 0:
                rois_ = rois[inds]
                roi_feats_t = self.pooler[i](features[i], rois_)
                roi_feats[inds] = roi_feats_t
            else:
                # ensure each feature pyramid is included in the computation graph to avoid runtime bugs.
                roi_feats += sum(x.view(-1)[0] for x in self.parameters()) * 0. + features[i].sum() * 0.
        
        return roi_feats


    def forward_mask(self, features, proposals, masks=None, labels=None):
        if self.training:
            fg_proposals, fg_select_masks = select_foreground_proposals(proposals, labels)
            gt_masks = []
            fg_labels = []
            for m, masks_per_img, label_per_img in zip(fg_select_masks, masks, labels):
                gt_masks.append(masks_per_img[m])
                fg_labels.append(label_per_img[m])

            pooled_features = ops.roi_align(features, proposals,
                                            output_size=(14, 14),
                                            spatial_scale=self.spatial_scale,
                                            sampling_ratio=2)
            mask_features = pooled_features[cat(fg_select_masks, dim=0)]
            mask_logits = self.mask_head(mask_features)
            del pooled_features
            mask_loss = mask_rcnn_loss(mask_logits, gt_masks, fg_proposals, fg_labels)
            loss_dict = {'mask_loss': mask_loss}
            return loss_dict
        else:
            detections = proposals
            proposals = [det['boxes'] for det in detections]
            pooled_features = ops.roi_align(features, proposals,
                                            output_size=(14, 14),
                                            spatial_scale=self.spatial_scale,
                                            sampling_ratio=2)

            mask_logits = self.mask_head(pooled_features)
            detections = self.mask_inference(mask_logits, detections)
            return detections

    def fastrcnn_loss(self, predictions, proposals):
        scores, proposal_deltas = predictions

        # parse classification outputs
        gt_classes = cat([p['gt_classes'] for p in proposals], dim=0)
        # _log_classification_stats(scores, gt_classes)     # compute accurate

        # parse box regression outputs
        if len(proposals):
            proposal_boxes = cat([p['proposal_boxes'] for p in proposals], dim=0) # N x 4
            assert not proposal_boxes.requires_grad, "Proposals should not require gradients!"
            
            gt_boxes = cat([p['gt_boxes'] for p in proposals], dim=0)
        else:
            proposal_boxes = gt_boxes = torch.empty((0, 4), device=proposal_deltas.device)
        # loss cls
        loss_cls = F.cross_entropy(scores, gt_classes, reduction="mean")
        # loss box regression
        loss_box_reg = self.box_reg_loss(proposal_boxes, gt_boxes, proposal_deltas, gt_classes)

        rcnn_loss = {
            "rcnn_cls_loss": loss_cls,
            "rcnn_reg_loss": loss_box_reg
        }

        return rcnn_loss
    
    def box_reg_loss(self, proposal_boxes, gt_boxes, proposal_deltas, gt_classes):
        box_dim = proposal_boxes.shape[1]   # 4 or 5
        fg_inds = torch.nonzero((gt_classes >= 0) & (gt_classes < self.num_classes), as_tuple=True)[0]
        if proposal_deltas.shape[1] == box_dim:     # cls-agnostic regression
            fg_pred_deltas = proposal_deltas[fg_inds]
        else:
            fg_pred_deltas = proposal_deltas.view(-1, self.num_classes, box_dim)[fg_inds, gt_classes[fg_inds]]
        
        # compute loss
        if self.box_reg_loss_type == "smooth_l1":
            gt_pred_deltas = self.box_coder.encode(proposal_boxes[fg_inds], gt_boxes[fg_inds])
            loss_box_reg = smooth_l1_loss(fg_pred_deltas, gt_pred_deltas, beta=1.0, reduction="sum")
        elif self.box_reg_loss_type == "giou":
            raise ValueError(f"current unsupport this loss type '{self.box_reg_loss_type}'")
        else:
            raise ValueError(f"Invalid dense box regression loss type '{self.box_reg_loss_type}'")

        return loss_box_reg / max(gt_classes.numel(), 1.0)

    
    def mask_inference(self, pred_mask_logits, detections):
        # Select masks corresponding to the predicted classes
        num_boxes_per_image = [len(det['labels']) for det in detections]
        num_masks = pred_mask_logits.shape[0]
        assert sum(num_boxes_per_image) == num_masks

        class_pred = cat([det['labels'] for det in detections])
        indices = torch.arange(num_masks)
        mask_probs_pred = pred_mask_logits[indices, class_pred][:, None].sigmoid()

        # mask_probs_pred.shape: (B, 1, M, M)
        mask_probs_pred = mask_probs_pred.split(num_boxes_per_image, dim=0)

        for prob, det in zip(mask_probs_pred, detections):
            det['masks'] = prob  # (N, 1, M, M)

        return detections

    def inference(self, predictions, proposals, img_metas):
        boxes = self.predict_boxes(predictions, proposals)
        scores = self.predict_probs(predictions, proposals)
        image_shapes = [x['img_shape'] for x in img_metas]
        
        results = []
        for scores_per_image, boxes_per_image, image_shape in zip(scores, boxes, image_shapes):
            valid_mask = torch.isfinite(boxes_per_image).all(dim=1) & torch.isfinite(scores_per_image).all(dim=1)
            if not valid_mask.all():
                boxes_per_image = boxes_per_image[valid_mask]
                scores_per_image = scores_per_image[valid_mask]
            
            scores_per_image = scores_per_image[:, :-1]
            num_bbox_reg_classes = boxes_per_image.shape[1] // 4
            boxes_per_image = boxes_per_image.reshape(-1, 4)
            boxes_per_image = box_ops.clip_boxes_to_image(boxes_per_image, image_shape)
            boxes_per_image = boxes_per_image.view(-1, num_bbox_reg_classes, 4)  # R x C x 4

            # 1. Filter results based on detection scores. It can make NMS more efficient
            #    by filtering out low-confidence detections.
            filter_mask = scores_per_image > self.score_thresh
            filter_inds = torch.nonzero(filter_mask)
            if num_bbox_reg_classes == 1:
                boxes_per_image = boxes_per_image[filter_inds[:, 0], 0]
            else:
                boxes_per_image = boxes_per_image[filter_mask]
            scores_per_image = scores_per_image[filter_mask]

            # 2. Apply NMS for each class independently.
            keep = batched_nms(boxes_per_image, scores_per_image, filter_inds[:, 1], self.nms_thresh)
            if self.detections_per_img >= 0:
                keep = keep[:self.detections_per_img]
            boxes_per_image, scores_per_image, filter_inds = boxes_per_image[keep], scores_per_image[keep], filter_inds[keep]
            result = {
                'boxes': boxes_per_image,
                'scores': scores_per_image,
                'labels': filter_inds[:, 1]
            }
            results.append(result)

        return results

    
    def predict_boxes(self, predictions, proposals):
        if not len(proposals):
            return []
        _, proposal_deltas = predictions
        num_prop_per_image = [len(p) for p in proposals]
        proposal_boxes = cat(proposals, dim=0)
        predict_boxes = self.box_coder.decode(
            proposal_deltas,
            proposal_boxes,
        )  # Nx(KxB)
        return predict_boxes.split(num_prop_per_image)
    
    def predict_probs(self, predictions, proposals):
        scores, _ = predictions
        num_inst_per_image = [len(p) for p in proposals]
        probs = F.softmax(scores, dim=-1)
        return probs.split(num_inst_per_image, dim=0)
