import torch
from torch import nn
import torch.nn.functional as F
from .backone import get_backone
from .necks import FPN
from .head import CenternetHead
from .loss import HeatmapFocalLoss, GIouLoss, MaskedL1Loss, SmoothL1Loss, WingLoss   
import torchvision.models._utils as _utils
from functools import partial
from lib.data.util import bbox_areas
import numpy as np

def multi_apply(func, *args, **kwargs):
    pfunc = partial(func, **kwargs) if kwargs else func
    map_results = map(pfunc, *args)
    return tuple(map(list, zip(*map_results)))

class CenterNetDecoder(nn.Module):
    def __init__(self, topk=100, down_ratio=4.0):
        super(CenterNetDecoder, self).__init__()
        self.topk = topk
        self.down_ratio = down_ratio
        self.nms = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
    
    def _gather_feature(self, fmap, index, mask=None, use_transform=False):
        if use_transform:
            # change a (N, C, H, W) tenor to (N, HxW, C) shape
            batch, channel = fmap.shape[:2]
            fmap = fmap.view(batch, channel, -1).permute((0, 2, 1)).contiguous()
        dim = fmap.size(-1)
        index  = index.unsqueeze(len(index.shape)).expand(*index.shape, dim)
        fmap = fmap.gather(dim=1, index=index)
        if mask is not None:
            # this part is not called in Res18 dcn COCO
            mask = mask.unsqueeze(2).expand_as(fmap)
            fmap = fmap[mask]
            fmap = fmap.reshape(-1, dim)
        return fmap
    
    def _topk_score(self, fmap, K):
        batch, channel, height, width = fmap.shape
        
        topk_scores, topk_inds = torch.topk(fmap.reshape(batch, channel, -1), K)
        topk_inds = topk_inds % (height * width)
        topk_ys = (topk_inds // width).int().float()
        topk_xs = (topk_inds % width).int().float()

        # get all topk in in a batch
        topk_score, index = torch.topk(topk_scores.reshape(batch, -1), K)
        # div by K because index is grouped by K(C x K shape)
        topk_clses = (index // K).int()
        topk_inds = self._gather_feature(topk_inds.view(batch, -1, 1), index).reshape(batch, K)
        topk_ys = self._gather_feature(topk_ys.reshape(batch, -1, 1), index).reshape(batch, K)
        topk_xs = self._gather_feature(topk_xs.reshape(batch, -1, 1), index).reshape(batch, K)
        return topk_score, topk_inds, topk_clses, topk_ys, topk_xs   
    
    def forward(self, pred_hm, pred_ltrb, pred_landm):
        keep = pred_hm.eq(self.nms(pred_hm)).float()
        pred_hm *= keep
        batch, channel, height, width = pred_hm.shape
        area = height*width
        K = self.topk if self.topk<area else area
        scores, index, clses, ys, xs = self._topk_score(pred_hm, K)
        ltrb = self._gather_feature(pred_ltrb, index, use_transform=True)
        ltrb = ltrb.view(batch, K, 4)

        clses  = clses.reshape(batch, K, 1).float()
        scores = scores.reshape(batch, K, 1)

        xs_ori = xs.view(batch, K, 1)*self.down_ratio
        ys_ori = ys.view(batch, K, 1)*self.down_ratio
        bboxes = torch.cat([xs_ori - ltrb[..., 0:1], ys_ori - ltrb[..., 1:2], 
            xs_ori + ltrb[..., 2:3], ys_ori + ltrb[..., 3:4]], dim=2)
        
        # wh = torch.cat([ltrb[..., 0:1]+ltrb[..., 2:3], ltrb[..., 1:2]+ltrb[..., 3:4]], dim=2)
        # landms = self._gather_feature(pred_landm, index, use_transform=True)
        # landms = landms.view(batch, K, 10)
        # landms[..., ::2] *= wh[..., 0:1].view(batch, K, 1).expand(batch, K, 5) 
        # landms[..., 1::2] *= wh[..., 1:2].view(batch, K, 1).expand(batch, K, 5) 
        # landms[..., ::2] += xs_ori.view(batch, K, 1).expand(batch, K, 5)
        # landms[..., 1::2] += ys_ori.view(batch, K, 1).expand(batch, K, 5)
        # detections = (bboxes, scores, landms, clses)


        
        landms = self._gather_feature(pred_landm, index, use_transform=True)
        landms = landms.view(batch, K, 10)
        landms[landms>1] = landms[landms>1].exp()
        landms[landms<-1] = -landms[landms<-1].abs().exp()
        landms[landms.abs()<=1] *= np.e
        landms[..., ::2] += xs.view(batch, K, 1).expand(batch, K, 5)
        landms[..., 1::2] += ys.view(batch, K, 1).expand(batch, K, 5)
        landms *= self.down_ratio
        detections = (bboxes, scores, landms, clses)
        return detections


class CenterNet(nn.Module):
    def __init__(self,
                cfg,
                alpha=0.54,
                head_conv=64,
                topk=100,
                down_ratio=4.0,
                flip_test=False):
        super(CenterNet, self).__init__()
        self.cfg = cfg
        self.alpha = alpha
        self.down_ratio = down_ratio
        self.flip_test = flip_test
        self.base_loc = None
        
        self.num_class = cfg['model']['num_classes']
        self.image_size = cfg['dataset']['image_size']
        self.min_face = cfg['dataset']['min_face']
        
        self.decoder = CenterNetDecoder(topk, down_ratio)
        self.hm_loss = HeatmapFocalLoss(weight=cfg['train']['heatmap_weight'])
        self.ltrb_loss = GIouLoss(weight=cfg['train']['ltrb_weight'])
        # self.landms_loss = SmoothL1Loss(weight=cfg['train']['landmark_weight'])
        self.landms_loss = WingLoss(weight=cfg['train']['landmark_weight'], w=2)
        
        if 'dla' in cfg['model']['name']:
            self.body = nn.Sequential(
                get_backone(cfg['model']['name'], num_layers=34, heads={'cls':1, 'ltrb':4})
            )
        elif 'res' in cfg['model']['name']:
            return_layers = {k:v for v, k in enumerate(cfg['model']['return_layers'])}
            backbone = get_backone(cfg['model']['name'], pretrained=cfg['model']['pretrain'])
            self.body = nn.Sequential(
                _utils.IntermediateLayerGetter(backbone, return_layers),
                FPN(cfg['model']['fpn_channels'], 64),
                CenternetHead(head_conv=head_conv, num_classes=cfg['model']['num_classes'])
            )
        elif 'mobilenet' in cfg['model']['name']:
            return_layers = {k:v for v, k in enumerate(cfg['model']['return_layers'])}
            backbone = get_backone(cfg['model']['name'], width_mult=1.0)
            self.body = nn.Sequential(
                _utils.IntermediateLayerGetter(backbone, return_layers),
                FPN(cfg['model']['fpn_channels'], 64, in_size=self.image_size),
                CenternetHead(head_conv=head_conv, num_classes=cfg['model']['num_classes'], landmark=10)
            )
    
    def forward(self, x):
        y = self.body(x)
        y['cls'] = torch.clamp(y['cls'], 1e-6, 1 - 1e-6)
        if self.training:
            return y
        if self.flip_test:
            y_flip = self.body(x.flip(dims=[3]))
            y['cls'] = (y['cls']+y_flip['cls'].flip(dims=[3]))*0.5
        dets = self.decoder(y['cls'], y['ltrb'], y['landm'])
        return dets
    

    def loss(self, pred_hm, pred_ltrb, pred_landm, targets):
        gt_bboxes = [t[:, :4] for t in targets]
        gt_landmarks = [t[:, 4:-1] for t in targets]
        gt_labels = [t[:, -1].long() for t in targets]
        all_targets = self.target_generator(gt_bboxes, gt_landmarks, gt_labels)
        hm_loss, ltrb_loss, landm_loss = self.loss_calc(pred_hm, pred_ltrb, pred_landm, *all_targets)
        return hm_loss, ltrb_loss, landm_loss
    
    def loss_calc(self, 
            pred_hm, 
            pred_ltrb, 
            pred_landm,
            hm_gt,
            hm_mask, 
            ltrb_gt, 
            ltrb_weight,
            landm_gt,
            landm_mask):
        H, W = pred_hm.shape[2:]
        hm_loss = self.hm_loss(pred_hm, hm_gt, hm_mask)
        landm_loss = self.landms_loss(pred_landm, landm_gt, landm_mask)

        mask = ltrb_weight.view(-1, H, W)
        avg_factor = mask.sum() + 1e-4

        if self.base_loc is None or H != self.base_loc.shape[1] or W != self.base_loc.shape[2]:
            base_step = self.down_ratio
            shifts_x = torch.arange(0, (W - 1) * base_step + 1, base_step,
                                    dtype=torch.float32, device=hm_gt.device)
            shifts_y = torch.arange(0, (H - 1) * base_step + 1, base_step,
                                    dtype=torch.float32, device=hm_gt.device)
            shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x)
            self.base_loc = torch.stack((shift_x, shift_y), dim=0)  # (2, h, w)


        # (batch, h, w, 4)
        pred_boxes = torch.cat((self.base_loc - pred_ltrb[:, [0, 1]],
                self.base_loc + pred_ltrb[:, [2, 3]]), dim=1).permute(0, 2, 3, 1)
        # (batch, h, w, 4)
        boxes = ltrb_gt.permute(0, 2, 3, 1)
        ltrb_loss = self.ltrb_loss(pred_boxes, boxes, mask, avg_factor=avg_factor)

        return hm_loss, ltrb_loss, landm_loss


    def target_generator(self, gt_boxes, gt_landmarks, gt_labels):
        with torch.no_grad():
            hm_gt, hm_mask, ltrb_gt, ltrb_weight, landm_gt, landm_mask = multi_apply(
                self.target_single_image,
                gt_boxes,
                gt_landmarks,
                gt_labels,
            )
            hm_gt = torch.stack(hm_gt, dim=0).detach()
            hm_mask = torch.stack(hm_mask, dim=0).detach()
            ltrb_gt = torch.stack(ltrb_gt, dim=0).detach()
            ltrb_weight = torch.stack(ltrb_weight, dim=0).detach()
            landm_gt = torch.stack(landm_gt, dim=0).detach()
            landm_mask = torch.stack(landm_mask, dim=0).detach()
            return hm_gt, hm_mask, ltrb_gt, ltrb_weight, landm_gt, landm_mask
    
    
    def target_single_image(self, gt_boxes, gt_landmarks, gt_labels):
        output_h = int(self.image_size[1]//self.down_ratio)
        output_w =  int(self.image_size[0]//self.down_ratio)
        hm_gt = gt_boxes.new_zeros((self.num_class, output_h, output_w))
        hm_mask = gt_boxes.new_ones((self.num_class, output_h, output_w))
        fake_hm = gt_boxes.new_zeros((output_h, output_w))
        landm_gt = gt_boxes.new_zeros((10, output_h, output_w))
        landm_mask = gt_boxes.new_zeros((10, output_h, output_w))
        ltrb_gt = gt_boxes.new_zeros((4, output_h, output_w))
        ltrb_weight = gt_boxes.new_zeros((1, output_h, output_w))

        boxes_areas_log = bbox_areas(gt_boxes).log()
        boxes_area_topk_log, boxes_ind = torch.topk(boxes_areas_log, boxes_areas_log.size(0))
        gt_boxes = gt_boxes[boxes_ind]
        gt_labels = gt_labels[boxes_ind]
        gt_landmarks = gt_landmarks[boxes_ind]

        feat_gt_boxes = gt_boxes / self.down_ratio
        feat_gt_boxes[:, ::2] = torch.clamp(feat_gt_boxes[:, ::2], min=0, max=output_w - 1)
        feat_gt_boxes[:, 1::2] = torch.clamp(feat_gt_boxes[:, 1::2], min=0, max=output_h - 1)
        feat_hs, feat_ws = (feat_gt_boxes[:, 3] - feat_gt_boxes[:, 1],
                            feat_gt_boxes[:, 2] - feat_gt_boxes[:, 0])
        gt_wh = torch.stack([gt_boxes[:, 2] - gt_boxes[:, 0], gt_boxes[:, 3] - gt_boxes[:, 1]], dim=1)

        ct_ints = ((torch.stack([(gt_boxes[:, 0] + gt_boxes[:, 2]) / 2, (gt_boxes[:, 1] + gt_boxes[:, 3]) / 2], dim=1)) / self.down_ratio).int()
        ct_ints[:, 0:1] = torch.clamp(ct_ints[:, 0:1], min=0, max=output_w-1)
        ct_ints[:, 1:2] = torch.clamp(ct_ints[:, 1:2], min=0, max=output_h-1)

        h_radiuses_alpha = (feat_hs / 2. * self.alpha).int()
        w_radiuses_alpha = (feat_ws / 2. * self.alpha).int()

        for k in range(boxes_ind.shape[0]):
            cls_id = gt_labels[k]
            fake_hm = fake_hm.zero_()
            w_r, h_r = w_radiuses_alpha[k].item(), h_radiuses_alpha[k].item()
            self.draw_truncate_gaussian(fake_hm, ct_ints[k], h_r, w_r)
            hm_gt[cls_id] = torch.max(hm_gt[cls_id], fake_hm)
            x, y = ct_ints[k]
            # reg = (gt_landmarks[k]-ct_ints[k].repeat(5)*self.down_ratio)/(gt_wh[k].repeat(5)+1e-8)
            
            reg = (gt_landmarks[k]/self.down_ratio-ct_ints[k].repeat(5))
            reg[reg.abs()<=np.e] /= np.e
            reg[reg>np.e] = reg[reg>np.e].log()
            reg[reg<-np.e] = -reg[reg<-np.e].abs().log()
            # landm_gt[:, y, x] = (gt_landmarks[k]/self.down_ratio-ct_ints[k].repeat(5))
            reg[gt_landmarks[k]<0] = 0
            landm_gt[:, y, x] = reg
            landm_mask[:, y, x][gt_landmarks[k]>=0] = 1

            # ignore face
            if not (gt_landmarks[k]>0).any(): # no visible landmark
                hm_mask[cls_id][y-h_r:y+h_r, x-w_r:x+w_r] = 0
                continue
            if max(feat_hs[k].item(), feat_ws[k].item())*self.down_ratio<self.min_face:
                hm_mask[cls_id][y-h_r:y+h_r, x-w_r:x+w_r] = 0
                # landm_mask[:, y, x] = 0
                continue
            
            ltrb_gt_inds = fake_hm > 0
            ltrb_gt[:, ltrb_gt_inds] = gt_boxes[k][:, None]
            local_hm = fake_hm[ltrb_gt_inds]
            ct_div = local_hm.sum()
            local_hm *= boxes_area_topk_log[k]
            ltrb_weight[0, ltrb_gt_inds] = local_hm / ct_div


        # import matplotlib.pyplot as plt
        # fig = plt.figure()
        # ax = fig.add_subplot(1, 1, 1)
        # hm = np.zeros((800, 800)).astype(np.float32)
        # ax.imshow(hm)
        # cnt = 0
        # for box, landm in zip(gt_boxes.cpu().numpy(), (gt_landmarks).cpu().numpy()):
        #     x1, y1, x2, y2 = box 
        #     rect = plt.Rectangle((x1, y1), x2-x1, y2-y1, fill=False, edgecolor='r', linewidth=1)
        #     ax.add_patch(rect)
        #     ax.text(x1-5, y1-5, '%d'%cnt, color='r', fontsize=5)
        #     landm = landm.reshape(5,2)
        #     for x,y in landm:
        #         circle = plt.Circle((x,y), 1 , color='r', fill=False)
        #         ax.add_patch(circle)
        #     ax.text(landm[0][0]-5, landm[0][1]-5, '%d'%cnt, color='r', fontsize=5)
        #     cnt+=1
        # plt.draw()
        # plt.show()

        return hm_gt, hm_mask, ltrb_gt, ltrb_weight, landm_gt, landm_mask


    def gaussian_2d(self, shape, sigma_x=1, sigma_y=1):
        m, n = [(ss - 1.) / 2. for ss in shape]
        y, x = np.ogrid[-m:m + 1, -n:n + 1]

        h = np.exp(-(x * x / (2 * sigma_x * sigma_x) + y * y / (2 * sigma_y * sigma_y)))
        h[h < np.finfo(h.dtype).eps * h.max()] = 0
        return h
    
    def draw_truncate_gaussian(self, heatmap, center, h_radius, w_radius, k=1):
        h, w = 2 * h_radius + 1, 2 * w_radius + 1
        sigma_x = w / 6
        sigma_y = h / 6
        gaussian = self.gaussian_2d((h, w), sigma_x=sigma_x, sigma_y=sigma_y)
        gaussian = heatmap.new_tensor(gaussian)

        x, y = int(center[0]), int(center[1])

        height, width = heatmap.shape[0:2]

        left, right = min(x, w_radius), min(width - x, w_radius + 1)
        top, bottom = min(y, h_radius), min(height - y, h_radius + 1)

        masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]
        masked_gaussian = gaussian[h_radius - top:h_radius + bottom,
                          w_radius - left:w_radius + right]
        if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0:
            torch.max(masked_heatmap, masked_gaussian * k, out=masked_heatmap)
        return heatmap
    

    
