"""
paper ref:
    Li, B., Liu, Y., & Wang, X. (2019). Gradient harmonized single-stage detector.
    Paper presented at the Proceedings of the AAAI Conference on Artificial Intelligence.
link:
    https://www.aaai.org/ojs/index.php/AAAI/article/view/4877/4750

code ref:
    https://github.com/DHPO/GHM_Loss.pytorch/blob/master/GHM_loss.py
    https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/losses/ghm_loss.py
"""

import torch
import torch.nn as nn
import torch.nn.functional as F


def _expand_binary_labels(labels, label_channels):
    bin_labels = labels.new_full((labels.size(0), label_channels), 0)
    inds = torch.nonzero(labels >= 1).squeeze()
    if inds.numel() > 0:
        bin_labels[inds, labels[inds] - 1] = 1
    return bin_labels


class GradientHarmonizedLoss(nn.Module):
    def __init__(self, bins, momentum=0.0):
        super(GradientHarmonizedLoss, self).__init__()
        self.bins = bins
        self.momentum = momentum
        if momentum > 0:
            self.register_buffer('acc_sum', torch.zeros(bins))

    def _custom_loss(self, output, target, weight, reduction):
        raise NotImplementedError

    def _custom_loss_grad(self, output, target):
        raise NotImplementedError

    def forward(self, output, target):
        bins, mmt = self.bins, self.momentum

        # Statistics the histogram of gradient norm
        g = self._custom_loss_grad(output, target)
        g_hist = torch.histc(g, bins=bins, min=0, max=1)
        if mmt > 0:  # exponential moving average
            self.acc_sum = mmt * self.acc_sum + (1 - mmt) * g_hist
            g_hist = self.acc_sum
        g_index = (g * bins).long()
        valid_mask = g_hist > 0

        # calculate the weight by above histogram
        g_weight = torch.zeros_like(g_hist, dtype=torch.float)
        g_weight[valid_mask] = g_hist[valid_mask].reciprocal()

        # map g_weight to examples, then normlize it
        g_weight = g_weight[g_index]
        g_weight = g_weight / valid_mask.sum()

        # calculate the loss
        return self._custom_loss(output, target.float(), g_weight, reduction='sum')


class GHMCLoss(GradientHarmonizedLoss):
    def __init__(self, bins=10, momentum=0.0):
        super(GHMCLoss, self).__init__(bins, momentum)

    def _custom_loss(self, output, target, weight, reduction):
        return F.binary_cross_entropy_with_logits(output, target, weight, reduction='sum')

    def _custom_loss_grad(self, output, target):
        return torch.abs(output.sigmoid().detach() - target)

    def forward(self, output, target):
        if output.dim() != target.dim():  # convert target to onehot/binary label
            # target = torch.zeros_like(output).scatter_(1, target.unsqueeze(1), 1)
            target = _expand_binary_labels(target, output.shape[-1])
        return super(GHMCLoss, self).forward(output, target)


class GHMRLoss(GradientHarmonizedLoss):
    def __init__(self, bins=10, mu=0.02, momentum=0.0):
        super(GHMRLoss, self).__init__(bins, momentum)
        self.mu = mu

    def _custom_loss(self, output, target, weight, reduction):
        diff = output - target
        loss_r = torch.sqrt(diff * diff + self.mu * self.mu) - self.mu
        return (loss_r * weight).sum()

    def _custom_loss_grad(self, output, target):
        diff = output - target
        return torch.abs(diff / torch.sqrt(self.mu * self.mu + diff * diff)).detach()


# if __name__ == '__main__':
#     torch.manual_seed(2020)

#     # test ghmc
#     ghmc = GHMCLoss(momentum=0.6)
#     x = torch.randn((15, 5), requires_grad=True)
#     t = torch.randint(0, 5, (15,))

#     loss = ghmc(x, t)
#     loss.backward()
#     print(loss)

#     # test ghmr
#     ghmr = GHMRLoss(mu=0.2, momentum=0)
#     x = torch.randn((15, 1), requires_grad=True)
#     t = torch.randn((15, 1))

#     loss = ghmr(x, t)
#     loss.backward()
#     print(loss)
