import math

import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import xavier_init

from mmdet.models import HEADS, build_loss
from mmdet.models.dense_heads.base_dense_head import BaseDenseHead
from mmdet.models.utils import gaussian_radius, gen_gaussian_target


@HEADS.register_module()
class CenterHead(BaseDenseHead):
    """Head of CenterNet: Objects as Points

    Args:
        num_classes (int): Number of categories excluding the background
            category.
        in_channels (int): Number of channels in the input feature map.
    """
    def __init__(self,
                 num_classes,
                 in_channels,
                 train_cfg=None,
                 test_cfg=None,
                 loss_heatmap=dict(type="GaussianFocalLoss",
                                   alpha=2.0,
                                   gamma=4.0,
                                   loss_weight=1),
                 loss_wh_size=dict(type="L1Loss",
                                   reduction="mean",
                                   loss_weight=1),
                 loss_ct_off=dict(type="L1Loss",
                                  reduction="mean",
                                  loss_weight=1)):
        super().__init__()
        self.num_classes = num_classes
        self.loss_heatmap = build_loss(loss_heatmap)
        self.loss_wh_size = build_loss(loss_wh_size)
        self.loss_ct_off = build_loss(loss_ct_off)
        self.train_cfg = train_cfg
        self.test_cfg = test_cfg

        self.heatmap = nn.Sequential(
            nn.Conv2d(in_channels, 64, kernel_size=3, stride=1, padding=1),
            nn.ReLU(inplace=True), nn.Conv2d(64, num_classes, kernel_size=1))
        self.wh_size = nn.Sequential(
            nn.Conv2d(in_channels, 64, kernel_size=3, stride=1, padding=1),
            nn.ReLU(inplace=True), nn.Conv2d(64, 2, kernel_size=1))
        self.ct_off = nn.Sequential(
            nn.Conv2d(in_channels, 64, kernel_size=3, stride=1, padding=1),
            nn.ReLU(inplace=True), nn.Conv2d(64, 2, kernel_size=1))

    def init_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                xavier_init(m, distribution="uniform")
        self.heatmap[-1].bias.data.fill_(-2.19)

    def get_targets(self, gt_bboxes, gt_labels, feat_shape, img_shape) -> dict:
        """Generate center targets.

        Args:
            gt_bboxes (list[Tensor]): Ground truth bboxes of each image,
                each has shape (num_gt, 4).
            gt_labels (list[Tensor]): Ground truth labels of each box,
                each has shape (num_gt,).
            feat_shape (list[int]): Shape of output feature,
                [batch, channel, height, width].
            img_shape (list[int]): Shape of input image,
                [height, width, channel].

        Returns:
            dict: Ground truth heatmap, box size, center offset.
            Containing the following keys:

                - heatmap (Tensor): Ground truth center heatmap
                - wh_size (Tensor): Ground truth box size
                - ct_off (Tensor): Ground truth center offset
        """
        batch_size, _, height, width = feat_shape
        img_h, img_w = img_shape[:2]

        width_ratio = float(width / img_w)
        height_ratio = float(height / img_h)

        heatmap = gt_bboxes[-1].new_zeros(
            [batch_size, self.num_classes, height, width])
        wh_size = gt_bboxes[-1].new_zeros([batch_size, 2, height, width])
        ct_off = gt_bboxes[-1].new_zeros([batch_size, 2, height, width])

        gaussian_iou = self.train_cfg.gaussian_iou
        for batch_id in range(batch_size):
            for box_id in range(len(gt_labels[batch_id])):
                left, top, right, bottom = gt_bboxes[batch_id][box_id]
                center_x = (left + right) / 2.0
                center_y = (top + bottom) / 2.0
                label = gt_labels[batch_id][box_id]

                # Use coords in the feature level to generate ground truth
                scale_left = left * width_ratio
                scale_right = right * width_ratio
                scale_top = top * height_ratio
                scale_bottom = bottom * height_ratio
                scale_center_x = center_x * width_ratio
                scale_center_y = center_y * height_ratio

                # Int coords on feature map/ground truth tensor
                center_x_idx = int(min(scale_center_x, width - 1))
                center_y_idx = int(min(scale_center_y, height - 1))

                # Generate gaussian heatmap
                scale_box_width = math.ceil(scale_right - scale_left)
                scale_box_height = math.ceil(scale_bottom - scale_top)
                radius = gaussian_radius((scale_box_height, scale_box_width),
                                         min_overlap=gaussian_iou)
                radius = max(0, int(radius))
                heatmap[batch_id, label] = gen_gaussian_target(
                    heatmap[batch_id, label], [center_x_idx, center_y_idx],
                    radius)

                # Generate size target
                wh_size[batch_id, 0, center_y_idx, center_x_idx] = \
                    scale_right - scale_left
                wh_size[batch_id, 1, center_y_idx, center_x_idx] = \
                    scale_bottom - scale_top

                # Generate center offset
                ct_off[batch_id, 0, center_y_idx, center_x_idx] = \
                    scale_center_x - center_x_idx
                ct_off[batch_id, 1, center_y_idx, center_x_idx] = \
                    scale_center_y - center_y_idx

        target_result = dict(heatmap=heatmap, wh_size=wh_size, ct_off=ct_off)
        return target_result

    def forward(self, features):
        heatmap = self.heatmap(features)
        wh_size = self.wh_size(features)
        ct_off = self.ct_off(features)
        return heatmap, wh_size, ct_off

    def loss(self,
             heatmap,
             wh_size,
             ct_off,
             gt_boxes,
             gt_labels,
             img_metas,
             gt_bboxes_ignore=None):
        targets = self.get_targets(gt_boxes, gt_labels, heatmap.shape,
                                   img_metas[0]['pad_shape'])
        gt_heatmap = targets["heatmap"]
        gt_wh_size = targets["wh_size"]
        gt_ct_off = targets["ct_off"]

        avg_factor = max(1, gt_heatmap.eq(1).sum())
        heatmap_loss = self.loss_heatmap(heatmap.sigmoid(),
                                         gt_heatmap,
                                         avg_factor=avg_factor)
        # We compute the size and offset loss
        #   only at the real positive position.
        # The value of real positive would be 1 in heatmap ground truth.
        # The mask is computed in class agnostic mode and its shape is
        # batch * 1 * width * height.
        mask = gt_heatmap.eq(1).sum(1).gt(0).unsqueeze(1).type_as(gt_heatmap)
        wh_size_loss = self.loss_wh_size(wh_size,
                                         gt_wh_size,
                                         weight=mask,
                                         avg_factor=avg_factor)
        ct_off_loss = self.loss_ct_off(ct_off,
                                       gt_ct_off,
                                       weight=mask,
                                       avg_factor=avg_factor)
        loss_dict = dict(heatmap_loss=heatmap_loss,
                         wh_size_loss=wh_size_loss,
                         ct_off_loss=ct_off_loss)
        return loss_dict

    def get_bboxes(self, heatmap, wh_size, ct_off, img_metas, rescale=None):
        """Transform network output for a batch into bbox predictions.

        Args:
            heatmap (Tensor): object center score for each class
                Has shape (N, num_classes, H, W)
            wh_size (Tensor): object size, class agnostic
                Has shape (N, 2, H, W)
            ct_off (Tensor): object centernet offset, class agnostic
                Has shape (N, 2, H, W)
            img_metas (list[dict]): Meta information of each image, e.g.,
                image size, scaling factor, etc.
            rescale (bool, optional): If true,
                return boxes in original image space

        Returns:
            list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. \
                The first item is an (n, 5) tensor, where the first 4 columns \
                are bounding box positions (tl_x, tl_y, br_x, br_y) and the \
                5-th column is a score between 0 and 1. The second item is a \
                (n,) tensor where each item is the predicted class label of \
                the corresponding box.
        """
        assert len(heatmap) == len(img_metas)
        result_list = list()
        for i in range(len(img_metas)):
            det_bboxes, det_labels = self._get_bboxes_single(
                heatmap[i], wh_size[i], ct_off[i])
            if rescale:
                scale_factor = img_metas[i]["scale_factor"]
                det_bboxes[:, :-1] /= det_bboxes.new_tensor(scale_factor)
            result_list.append((det_bboxes, det_labels))
        return result_list

    @staticmethod
    def _nms(heatmap):
        hmax = F.max_pool2d(heatmap, kernel_size=3, stride=1, padding=1)
        keep = heatmap == hmax
        return keep.float() * heatmap

    def _get_bboxes_single(self,
                           heatmap,
                           wh_size,
                           ct_off,
                           topk=10,
                           threshold=0.3,
                           stride=4):
        c, h, w = heatmap.shape
        heatmap = torch.sigmoid(heatmap)
        heatmap = self._nms(heatmap)

        values, _ = torch.topk(heatmap.view(c, -1), k=topk)
        last_topk_value = values[:, None, [-1]]
        topk_keep = heatmap.ge(last_topk_value)  # each class has top k value
        thres_keep = heatmap.gt(threshold)  # threshold for all classes
        keep = topk_keep & thres_keep

        det_labels, c_y, c_x = torch.where(keep == 1)
        scores = torch.unsqueeze(heatmap[det_labels, c_y, c_x], dim=1)
        wh = wh_size[:, c_y, c_x]
        off = ct_off[:, c_y, c_x]
        c_x = c_x + off[0]
        c_y = c_y + off[1]
        x1 = torch.unsqueeze((c_x - wh[0] / 2) * stride, dim=1)
        y1 = torch.unsqueeze((c_y - wh[1] / 2) * stride, dim=1)
        x2 = torch.unsqueeze((c_x + wh[0] / 2) * stride, dim=1)
        y2 = torch.unsqueeze((c_y + wh[1] / 2) * stride, dim=1)
        det_bboxes = torch.cat([x1, y1, x2, y2, scores], dim=1)
        return det_bboxes, det_labels
