import torch
from torch import nn
import torchvision.models._utils as _utils
from .backone import get_backone 
from .necks import FPN
from .head import CenternetHead    
from .loss import HeatmapFocalLoss, GIouLoss   


def gather_feature(fmap, index, mask=None, use_transform=False):
    if use_transform:
        # change a (N, C, H, W) tenor to (N, HxW, C) shape
        batch, channel = fmap.shape[:2]
        fmap = fmap.view(batch, channel, -1).permute((0, 2, 1)).contiguous()

    dim = fmap.size(-1)
    index  = index.unsqueeze(len(index.shape)).expand(*index.shape, dim)
    fmap = fmap.gather(dim=1, index=index)
    if mask is not None:
        # this part is not called in Res18 dcn COCO
        mask = mask.unsqueeze(2).expand_as(fmap)
        fmap = fmap[mask]
        fmap = fmap.reshape(-1, dim)
    return fmap


class CenterNetDecoder(nn.Module):
    def __init__(self, topk=100, scale=4.0):
        super(CenterNetDecoder, self).__init__()
        self._topk = topk
        self._scale = scale

    def topk_score(self, fmap, K):
        batch, channel, height, width = fmap.shape
        
        topk_scores, topk_inds = torch.topk(fmap.reshape(batch, channel, -1), K)
        topk_inds = topk_inds % (height * width)
        topk_ys = (topk_inds / width).int().float()
        topk_xs = (topk_inds % width).int().float()

        # get all topk in in a batch
        topk_score, index = torch.topk(topk_scores.reshape(batch, -1), K)
        # div by K because index is grouped by K(C x K shape)
        topk_clses = (index / K).int()
        topk_inds = gather_feature(topk_inds.view(batch, -1, 1), index).reshape(batch, K)
        topk_ys = gather_feature(topk_ys.reshape(batch, -1, 1), index).reshape(batch, K)
        topk_xs = gather_feature(topk_xs.reshape(batch, -1, 1), index).reshape(batch, K)

        return topk_score, topk_inds, topk_clses, topk_ys, topk_xs


    def forward(self, fmap, wh):
        """Forward of decoder"""
        batch, channel, height, width = fmap.shape
        K = self._topk
        if K>height*width:
            K = height*width

        scores, index, clses, ys, xs = self.topk_score(fmap, K=K)
        
        wh = gather_feature(wh, index, use_transform=True)
        wh = wh.view(batch, K, 4)
        # wh = wh.exp()

        clses  = clses.reshape(batch, K, 1).float()
        scores = scores.reshape(batch, K, 1)

        xs = xs.view(batch, K, 1)
        ys = ys.view(batch, K, 1)
        bboxes = torch.cat([xs - wh[..., 0:1], ys - wh[..., 1:2], 
            xs + wh[..., 2:3], ys + wh[..., 3:4]], dim=2)

        
        detections = (bboxes*self._scale, scores, clses)
        return detections




class CenterNet(nn.Module):
    def __init__(self, cfg, topk=100, scale=4.0, flip_test=False):
        super(CenterNet, self).__init__()
        if 'res' in cfg.MODEL.NAME:
            backbone = get_backone(cfg.MODEL.NAME, pretrained=cfg.MODEL.PRETRAIN)
            return_layers = {k:v for v, k in enumerate(cfg.MODEL.RETURN_LAYERS)}
            self.body = _utils.IntermediateLayerGetter(backbone, return_layers)
            self.upsample = FPN(cfg.MODEL.FPN_CHANNELS, 64)
            self.head = CenternetHead(head_conv=64, num_classes=cfg.MODEL.NUM_CLASSES)
        elif 'dla' in cfg.MODEL.NAME:
            self.body = get_backone(cfg.MODEL.NAME, num_layers=34, heads={'cls':1, 'wh':4})

        self.flip_test = flip_test
        self.heatmap_nms = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
        self.decoder = CenterNetDecoder(topk=topk, scale=scale)
        self.hm_loss = HeatmapFocalLoss(weight=cfg.TRAIN.HEATMAP_WEIGHT)
        # self.hm_loss = nn.MSELoss()
        self.wh_loss = GIouLoss(weight=cfg.TRAIN.WH_WEIGHT)
        self.cfg = cfg

    def forward(self, x):
        y = self.body(x)
        if 'res' in self.cfg.MODEL.NAME:
            y = self.upsample(y)[0]
            y = self.head(y)
        y['cls'] = torch.clamp(y['cls'], 1e-4, 1 - 1e-4)
        if self.training:
            return y
        if self.flip_test:
            y_flip = self.base_network(x.flip(dims=[3]))[0]
            y['cls'] = (y['cls']+y_flip['cls'].flip(dims=[3]))*0.5
        heatmap = y['cls']
        keep = heatmap.eq(self.heatmap_nms(heatmap)).float()
        results = self.decoder(keep*heatmap, y['wh'])
        return results
    
    def loss_calc(self, 
            pred_hm, 
            pred_wh, 
            heatmap, 
            wh_target, 
            wh_weight):
        H, W = pred_hm.shape[2:]
        hm_loss = self.hm_loss(pred_hm, heatmap)

        mask = wh_weight.view(-1, H, W)
        avg_factor = mask.sum() + 1e-4

        shifts_x = torch.arange(0, W, dtype=torch.float32, device=heatmap.device)
        shifts_y = torch.arange(0, H, dtype=torch.float32, device=heatmap.device)
        shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x)
        base_loc = torch.stack((shift_x, shift_y), dim=0)  # (2, h, w)


        # (batch, h, w, 4)
        pred_boxes = torch.cat((base_loc - pred_wh[:, [0, 1]],
                base_loc + pred_wh[:, [2, 3]]), dim=1).permute(0, 2, 3, 1)
        # (batch, h, w, 4)
        boxes = wh_target.permute(0, 2, 3, 1)
        wh_loss = self.wh_loss(pred_boxes, boxes, mask, avg_factor=avg_factor)

        return hm_loss, wh_loss
    
