import torch
import torch.nn as nn
import torch.nn.functional as F


def _expand_binary_labels(labels, label_channels):
    bin_labels = labels.new_full((labels.size(0), label_channels), 0)
    inds = torch.nonzero(labels >= 1).squeeze()
    if inds.numel() > 0:
        bin_labels[inds, labels[inds] - 1] = 1
    return bin_labels


class GHMCLoss(nn.Module):
    def __init__(self, bins=10, momentum=0.0):
        super(GHMCLoss, self).__init__()
        self.bins = bins
        self.momentum = momentum
        if momentum > 0:
            self.register_buffer('acc_sum', torch.zeros(bins))

    def forward(self, output, target):
        bins, mmt = self.bins, self.momentum
        if output.dim() != target.dim():  # convert target to onehot/binary label
            target = torch.zeros_like(output).scatter_(1, target.unsqueeze(1), 1)
            # target = _expand_binary_labels(target, output.shape[-1])

        # static the histogram of gradient norm
        g = torch.abs(output.sigmoid().detach() - target)
        g_hist = torch.histc(g, bins=bins, min=0, max=1)
        if mmt > 0:  # exponential moving average
            self.acc_sum = mmt * self.acc_sum + (1 - mmt) * g_hist
            g_hist = self.acc_sum
        g_index = (g * bins).long()
        valid_mask = g_hist > 0

        # calculate the weight by above histogram
        g_weight = torch.zeros_like(g_hist, dtype=torch.float)
        g_weight[valid_mask] = g_hist[valid_mask].reciprocal()

        # map g_weight to examples, then normlize it
        g_weight = g_weight[g_index]
        g_weight = g_weight / valid_mask.sum()

        # calculate the loss
        loss = F.binary_cross_entropy_with_logits(output, target.float(), g_weight, reduction='sum')
        return loss


class GHMRLoss(nn.Module):
    def __init__(self, bins=10, mu=0.02, momentum=0.0):
        super(GHMRLoss, self).__init__()
        self.bins = bins
        self.mu = mu
        self.momentum = momentum
        if momentum > 0:
            self.register_buffer('acc_sum', torch.zeros(bins))

    def forward(self, output, target):
        bins, mu, mmt = self.bins, self.mu, self.momentum

        # calculate the loss
        diff = output - target
        loss = torch.sqrt(diff * diff + mu * mu) - mu

        # static the histogram of gradient norm
        g = torch.abs(diff / torch.sqrt(mu * mu + diff * diff)).detach()
        g_hist = torch.histc(g, bins=bins, min=0, max=1)
        if mmt > 0:  # exponential moving average
            self.acc_sum = mmt * self.acc_sum + (1 - mmt) * g_hist
            g_hist = self.acc_sum
        g_index = (g * bins).long()
        valid_mask = g_hist > 0

        # calculate the weight by above histogram
        g_weight = torch.zeros_like(g_hist, dtype=torch.float)
        g_weight[valid_mask] = g_hist[valid_mask].reciprocal()

        # map g_weight to examples, then normlize it
        g_weight = g_weight[g_index]
        g_weight = g_weight / valid_mask.sum()
        return (loss * g_weight).sum()


if __name__ == '__main__':
    torch.manual_seed(2020)

    # test ghmr
    ghmr = GHMRLoss(mu=0.2, momentum=0.3)
    x = torch.randn((15, 1), requires_grad=True)
    t = torch.randn((15, 1))

    loss = ghmr(x, t)
    loss.backward()
    print(loss)

    # test ghmc
    ghmc = GHMCLoss(momentum=0.3)
    x = torch.randn((15, 5), requires_grad=True)
    t = torch.randint(0, 5, (15,))

    loss = ghmc(x, t)
    loss.backward()
    print(loss)
