from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F


# from data import cfg_mnet
# GPU = cfg_mnet['gpu_train']


def gaussian_radius(det_size, min_overlap=0.7):
    height, width = det_size
    if isinstance(height, torch.Tensor):
        height = height.item()
        width = width.item()
    a1 = 1
    b1 = (height + width)
    c1 = width * height * (1 - min_overlap) / (1 + min_overlap)
    sq1 = np.sqrt(b1 ** 2 - 4 * a1 * c1)
    r1 = (b1 + sq1) / 2

    a2 = 4
    b2 = 2 * (height + width)
    c2 = (1 - min_overlap) * width * height
    sq2 = np.sqrt(b2 ** 2 - 4 * a2 * c2)
    r2 = (b2 + sq2) / 2

    a3 = 4 * min_overlap
    b3 = -2 * min_overlap * (height + width)
    c3 = (min_overlap - 1) * width * height
    sq3 = np.sqrt(b3 ** 2 - 4 * a3 * c3)
    r3 = (b3 + sq3) / 2
    return min(r1, r2, r3)


def gaussian2D(shape, sigma=1):
    m, n = [(ss - 1.) / 2. for ss in shape]
    y, x = np.ogrid[-m:m + 1, -n:n + 1]

    h = np.exp(-(x * x + y * y) / (2 * sigma * sigma))
    h[h < np.finfo(h.dtype).eps * h.max()] = 0
    h = np.float32(h)
    return torch.from_numpy(h)


def draw_umich_gaussian(heatmap, center, radius, k=1):
    diameter = 2 * radius + 1
    gaussian = gaussian2D((diameter, diameter), sigma=diameter / 6).to(heatmap.device)

    x, y = int(center[0]), int(center[1])

    height, width = heatmap.shape[0:2]

    left, right = min(x, radius), min(width - x, radius + 1)
    top, bottom = min(y, radius), min(height - y, radius + 1)

    masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]
    masked_gaussian = gaussian[radius - top:radius + bottom, radius - left:radius + right]
    if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0:  # TODO debug
        tmp = masked_gaussian >= masked_heatmap
        masked_heatmap[tmp] = masked_gaussian[tmp]
        # np.maximum(masked_heatmap, masked_gaussian * k, out=masked_heatmap)
    return heatmap


def draw_dense_reg(regmap, heatmap, center, value, radius, is_offset=False):
    diameter = 2 * radius + 1
    gaussian = gaussian2D((diameter, diameter), sigma=diameter / 6)
    # value = np.array(value, dtype=np.float32).reshape(-1, 1, 1)
    dim = value.shape[0]
    reg = torch.ones((dim, diameter * 2 + 1, diameter * 2 + 1), dtype=torch.float32) * \
          value.reshape(-1, 1, 1)
    if is_offset and dim == 2:
        delta = torch.arange(diameter * 2 + 1) - radius
    reg[0] = reg[0] - delta.reshape(1, -1)
    reg[1] = reg[1] - delta.reshape(-1, 1)

    x, y = int(center[0]), int(center[1])

    height, width = heatmap.shape[0:2]

    left, right = min(x, radius), min(width - x, radius + 1)
    top, bottom = min(y, radius), min(height - y, radius + 1)

    masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]
    masked_regmap = regmap[:, y - top:y + bottom, x - left:x + right]
    masked_gaussian = gaussian[radius - top:radius + bottom, radius - left:radius + right]
    masked_reg = reg[:, radius - top:radius + bottom, radius - left:radius + right]
    if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0:  # TODO debug
        height, width = masked_gaussian.shape[0], masked_gaussian.shape[1]
        idx = (masked_gaussian >= masked_heatmap).reshape(1, height, width)
    masked_regmap = (1 - idx) * masked_regmap + idx * masked_reg
    regmap[:, y - top:y + bottom, x - left:x + right] = masked_regmap
    return regmap


def draw_msra_gaussian(heatmap, center, sigma):
    tmp_size = sigma * 3
    mu_x = int(center[0] + 0.5)
    mu_y = int(center[1] + 0.5)
    w, h = heatmap.shape[0], heatmap.shape[1]
    ul = [int(mu_x - tmp_size), int(mu_y - tmp_size)]
    br = [int(mu_x + tmp_size + 1), int(mu_y + tmp_size + 1)]
    if ul[0] >= h or ul[1] >= w or br[0] < 0 or br[1] < 0:
        return heatmap
    size = 2 * tmp_size + 1
    x = torch.arange(0, size, 1).type(torch.float32)
    y = x.reshape((-1, 1))
    x0 = y0 = size // 2
    g = torch.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))
    g_x = max(0, -ul[0]), min(br[0], h) - ul[0]
    g_y = max(0, -ul[1]), min(br[1], w) - ul[1]
    img_x = max(0, ul[0]), min(br[0], h)
    img_y = max(0, ul[1]), min(br[1], w)
    masked_heatmap = heatmap[img_y[0]:img_y[1], img_x[0]:img_x[1]]
    masked_g = g[g_y[0]:g_y[1], g_x[0]:g_x[1]]
    tmp = masked_g >= masked_heatmap
    masked_heatmap[tmp] = masked_g[tmp]

    return heatmap


def _sigmoid(x):
    y = torch.clamp(x.sigmoid_(), min=1e-4, max=1 - 1e-4)
    return y


def _gather_feat(feat, ind, mask=None):
    dim = feat.size(2)
    ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
    # print("---------------------------------------------")
    # print("feat = {}".format(feat))
    # print("feat.shape = {}".format(feat.shape))
    # print("ind = {}".format(ind))
    # print("ind.shape = {}".format(ind.shape))
    # print("---------------------------------------------")
    try:
        feat = feat.gather(1, ind)
        # print("*************************************")
    except:
        print("error --> feat.shape = {}".format(feat.shape))
        print("error --> ind.shape = {}".format(ind.shape))
        print("feat = {}".format(feat))
        print("&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&")
    if mask is not None:
        mask = mask.unsqueeze(2).expand_as(feat)
        feat = feat[mask]
        feat = feat.view(-1, dim)
    return feat


def _transpose_and_gather_feat(feat, ind):
    feat = feat.permute(0, 2, 3, 1).contiguous()
    feat = feat.view(feat.size(0), -1, feat.size(3))
    feat = _gather_feat(feat, ind)
    return feat


def _slow_neg_loss(pred, gt):
    """focal loss from CornerNet"""
    pos_inds = gt.eq(1)
    neg_inds = gt.lt(1)

    neg_weights = torch.pow(1 - gt[neg_inds], 4)

    loss = 0
    pos_pred = pred[pos_inds]
    neg_pred = pred[neg_inds]

    pos_loss = torch.log(pos_pred) * torch.pow(1 - pos_pred, 2)
    neg_loss = torch.log(1 - neg_pred) * torch.pow(neg_pred, 2) * neg_weights

    num_pos = pos_inds.float().sum()
    pos_loss = pos_loss.sum()
    neg_loss = neg_loss.sum()

    if pos_pred.nelement() == 0:
        loss = loss - neg_loss
    else:
        loss = loss - (pos_loss + neg_loss) / num_pos

    return loss


def _neg_loss(pred, gt):
    """Modified focal loss. Exactly the same as CornerNet.
      Runs faster and costs a little bit more memory
    Arguments:
      pred (batch x c x h x w)
      gt_regr (batch x c x h x w)
    """
    pos_inds = gt.eq(1).float()
    neg_inds = gt.lt(1).float()

    neg_weights = torch.pow(1 - gt, 4)

    loss = 0

    pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds
    neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds

    num_pos = pos_inds.float().sum()
    pos_loss = pos_loss.sum()
    neg_loss = neg_loss.sum()

    if num_pos == 0:
        loss = loss - neg_loss
    else:
        loss = loss - (pos_loss + neg_loss) / num_pos
    return loss


def _not_faster_neg_loss(pred, gt):
    pos_inds = gt.eq(1).float()
    neg_inds = gt.lt(1).float()
    num_pos = pos_inds.float().sum()
    neg_weights = torch.pow(1 - gt, 4)

    loss = 0
    trans_pred = pred * neg_inds + (1 - pred) * pos_inds
    weight = neg_weights * neg_inds + pos_inds
    all_loss = torch.log(1 - trans_pred) * torch.pow(trans_pred, 2) * weight
    all_loss = all_loss.sum()

    if num_pos > 0:
        all_loss /= num_pos
    loss -= all_loss
    return loss


def _slow_reg_loss(regr, gt_regr, mask):
    num = mask.float().sum()
    mask = mask.unsqueeze(2).expand_as(gt_regr)

    regr = regr[mask]
    gt_regr = gt_regr[mask]

    regr_loss = nn.functional.smooth_l1_loss(regr, gt_regr, size_average=False)
    regr_loss = regr_loss / (num + 1e-4)
    return regr_loss


def _reg_loss(regr, gt_regr, mask):
    """
    L1 regression loss
    Arguments:
      regr (batch x max_objects x dim)
      gt_regr (batch x max_objects x dim)
      mask (batch x max_objects)
    """
    num = mask.float().sum()
    mask = mask.unsqueeze(2).expand_as(gt_regr).float()

    regr = regr * mask
    gt_regr = gt_regr * mask

    regr_loss = nn.functional.smooth_l1_loss(regr, gt_regr, size_average=False)
    regr_loss = regr_loss / (num + 1e-4)
    return regr_loss


class FocalLoss(nn.Module):
    """nn.Module warpper for focal loss"""

    def __init__(self):
        super(FocalLoss, self).__init__()
        self.neg_loss = _neg_loss

    def forward(self, out, target):
        return self.neg_loss(out, target)


class RegLoss(nn.Module):
    """
    Regression loss for an output tensor
    Arguments:
        output (batch x dim x h x w)
        mask (batch x max_objects)
        ind (batch x max_objects)
        target (batch x max_objects x dim)
    """

    def __init__(self):
        super(RegLoss, self).__init__()

    def forward(self, output, mask, ind, target):
        pred = _transpose_and_gather_feat(output, ind)
        loss = _reg_loss(pred, target, mask)
        return loss


class RegL1Loss(nn.Module):
    def __init__(self):
        super(RegL1Loss, self).__init__()

    def forward(self, output, mask, ind, target):
        pred = _transpose_and_gather_feat(output, ind)
        mask = mask.unsqueeze(2).expand_as(pred).float()
        # loss = F.l1_loss(pred * mask, target * mask, reduction='elementwise_mean')
        loss = F.l1_loss(pred * mask, target * mask, size_average=False)
        loss = loss / (mask.sum() + 1e-4)
        return loss


class NormRegL1Loss(nn.Module):
    def __init__(self):
        super(NormRegL1Loss, self).__init__()

    def forward(self, output, mask, ind, target):
        pred = _transpose_and_gather_feat(output, ind)
        mask = mask.unsqueeze(2).expand_as(pred).float()
        # loss = F.l1_loss(pred * mask, target * mask, reduction='elementwise_mean')
        pred = pred / (target + 1e-4)
        target = target * 0 + 1
        loss = F.l1_loss(pred * mask, target * mask, size_average=False)
        loss = loss / (mask.sum() + 1e-4)
        return loss


class RegWeightedL1Loss(nn.Module):
    def __init__(self):
        super(RegWeightedL1Loss, self).__init__()

    def forward(self, output, mask, ind, target):
        pred = _transpose_and_gather_feat(output, ind)
        mask = mask.float()
        # loss = F.l1_loss(pred * mask, target * mask, reduction='elementwise_mean')
        loss = F.l1_loss(pred * mask, target * mask, size_average=False)
        loss = loss / (mask.sum() + 1e-4)
        return loss


class L1Loss(nn.Module):

    def __init__(self):
        super(L1Loss, self).__init__()

    def forward(self, output, mask, ind, target):
        pred = _transpose_and_gather_feat(output, ind)
        mask = mask.unsqueeze(2).expand_as(pred).float()
        loss = F.l1_loss(pred * mask, target * mask, reduction='elementwise_mean')
        return loss


# 0: xyxy
# 1: xywh
def get_batch_targets(annotations, num_classes, num_joints, dsize, mode = 0):
    max_objs = min(dsize)
    output_h, output_w = dsize
    batch_size = len(annotations)
    device = annotations[0].device

    hm = torch.zeros((batch_size, num_classes, output_h, output_w), dtype=torch.float32).to(device)
    hm_hp = torch.zeros((batch_size, num_joints, output_h, output_w), dtype=torch.float32).to(device)
    wh = torch.zeros((batch_size, max_objs, 2), dtype=torch.float32).to(device)
    kps = torch.zeros((batch_size, max_objs, num_joints * 2), dtype=torch.float32).to(device)
    reg = torch.zeros((batch_size, max_objs, 2), dtype=torch.float32).to(device)
    ind = torch.zeros((batch_size, max_objs), dtype=torch.int64).to(device)
    reg_mask = torch.zeros((batch_size, max_objs), dtype=torch.uint8).to(device)
    kps_mask = torch.zeros((batch_size, max_objs, num_joints * 2), dtype=torch.uint8).to(device)
    hp_offset = torch.zeros((batch_size, max_objs * num_joints, 2), dtype=torch.float32).to(device)
    hp_ind = torch.zeros((batch_size, max_objs * num_joints), dtype=torch.int64).to(device)
    hp_mask = torch.zeros((batch_size, max_objs * num_joints), dtype=torch.int64).to(device)

    for batch_i in range(batch_size):
        annotation = annotations[batch_i]
        # gt_det = []
        num_objs = annotation.shape[0]
        for k in range(min(max_objs, num_objs)):
            if annotation[k][-1] < 0:
                continue

            bbox = annotation[k][:4]
            cls_id = 0

            if mode:
                h, w = bbox[2:4]
            else:
                h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]
            h *= output_h
            w *= output_w

            if h > 0 and w > 0:
                # bounding box
                radius = gaussian_radius((h, w))
                radius = max(0, int(radius))
                center_x, center_y = bbox[0] * output_w + w / 2, bbox[1] * output_h + h / 2
                ct = torch.zeros((2), dtype=torch.float32).to(device)
                ct[0], ct[1] = center_x, center_y
                ct_int = ct.int()
                draw_umich_gaussian(hm[batch_i][int(cls_id)], ct_int, radius)
                wh[batch_i][k] = torch.Tensor([1. * w, 1. * h]).to(device)
                ind[batch_i][k] = int(ct_int[1] * output_w + ct_int[0])
                reg[batch_i][k] = ct - ct_int
                reg_mask[batch_i][k] = 1
                # keypoint
                hp_radius = radius  # gaussian_radius((h, w))
                hp_radius = max(0, int(hp_radius))
                pts = annotation[k][4:-1].reshape(-1, 2) * torch.FloatTensor([output_w, output_h]).to(device)
                for j in range(num_joints):

                    if pts[j, 0] >= 0 and pts[j, 0] < output_w and pts[j, 1] >= 0 and pts[j, 1] < output_h:
                        pt_int = pts[j, :2].int()
                        kps[batch_i][k, j * 2: j * 2 + 2] = pts[j, :2] - ct_int
                        hp_offset[batch_i][k * num_joints + j] = pts[j, :2] - pt_int
                        hp_ind[batch_i][k * num_joints + j] = pt_int[1] * output_w + pt_int[0]
                        hp_mask[batch_i][k * num_joints + j] = 1
                        kps_mask[batch_i][k, j * 2: j * 2 + 2] = 1

                        draw_umich_gaussian(hm_hp[batch_i][j], pt_int, hp_radius)

    return hm, hm_hp, wh, kps, reg, ind, reg_mask, kps_mask, hp_offset, hp_ind, hp_mask


class CenterFaceLoss(nn.Module):

    def __init__(self, num_classes=1, num_joints=5):
        super(CenterFaceLoss, self).__init__()
        self.num_classes = num_classes
        self.num_joints = num_joints
        self.crit = FocalLoss()  # object center
        self.crit_hm_hp = FocalLoss()  # keypoint center
        self.crit_kp = RegWeightedL1Loss()  # keypoint-offset
        self.crit_reg = RegL1Loss()  # wh wh-offset

    def forward(self, predictions, targets, dsize):
        hm_, wh_, kps_, wh_o, hm_hp_, hm_hp_o = predictions
        # test data for heatmap, not for reg
        # hm_, hm_hp_, wh_, _, _, _, hm_hp_, _, _ = predictions # for func testing
        hm, hm_hp, wh, kps, reg, ind, reg_mask, kps_mask, hp_offset, hp_ind, hp_mask \
            = get_batch_targets(targets, self.num_classes, self.num_joints, dsize)

        loss_c = self.crit(_sigmoid(hm_), hm)
        loss_b = self.crit_reg(wh_, reg_mask, ind, wh) * 0.1 + self.crit_reg(wh_o, reg_mask, ind, reg)
        loss_l = self.crit_kp(kps_, kps_mask, ind, kps) + self.crit_hm_hp(_sigmoid(hm_hp_), hm_hp) \
                 + self.crit_reg(hm_hp_o, hp_mask, hp_ind, hp_offset)

        return loss_c, loss_b, loss_l


if __name__ == "__main__":
    ann = np.array([[209.0, 322.0, 201.0, 182.0, 269.071, 414.263, 324.661, 361.201, 333.504, 405.42, 323.397, 468.589,
                     378.987, 420.58, 1.0],
                    [209.0, 322.0, 201.0, 182.0, 269.071, 414.263, 324.661, 361.201, 333.504, 405.42, 323.397, 468.589,
                     378.987, 420.58, 1.0]],
                   dtype=np.float32)

    annos = [torch.from_numpy(ann) / 512, torch.from_numpy(ann) / 512]
    predictions = get_batch_targets(annos, 1, 5, (128, 128))
    loss = CenterFaceLoss()
    loss(predictions, [torch.from_numpy(ann) / 512, torch.from_numpy(ann) / 512])