import torch
from torch import nn
from torch.nn import  functional as F
import numpy as np


class OhemCELoss(nn.Module):

    def __init__(self, thresh, lb_ignore=255, factor= 16):
        super(OhemCELoss, self).__init__()
        self.factor = factor
        self.thresh = -torch.log(torch.tensor(thresh, requires_grad=False, dtype=torch.float)).cuda()
        self.lb_ignore = lb_ignore
        self.criteria = nn.CrossEntropyLoss(ignore_index=lb_ignore, reduction='none')

    def forward(self, logits, labels):
        n_min = labels[labels != self.lb_ignore].numel() // self.factor
        loss = self.criteria(logits, labels)
        loss = loss.view(-1)
        loss_hard = loss[loss > self.thresh]
        if loss_hard.numel() < n_min:
            loss_hard, _ = loss.topk(n_min)
        return torch.mean(loss_hard)










class SigCrossV2(nn.Module):

    def __init__(self, label_num= 19,lb_ignore=255):
        super(SigCrossV2, self).__init__()
        self.num_classes = label_num
        self.lb_ignore = lb_ignore
        #self.thresh = -torch.log(torch.tensor(0.7, requires_grad=False, dtype=torch.float)).cuda()
        self.thresh = -np.log(0.7)
        self.label_big = [0, 2, 8]
    def forward(self, logits, target):
        B,C,H, W = logits.size()

        mask = torch.ones(B*W*H, C).float().to(logits.device)
        mask[:,0] = mask[:,0]*0.0
        n_min = target[target != self.lb_ignore].numel()//16
        labels = torch.clone(target)
        logits = logits.permute(0, 2, 3, 1).reshape( -1, self.num_classes)
        labels = labels.view(-1)
        logits = logits[labels != self.lb_ignore]
        labels = labels[labels != self.lb_ignore]
        one_hot_labels = F.one_hot(labels,self.num_classes)


        cre_loss = torch.softmax(logits, dim=-1)
        cre_loss = torch.clip(cre_loss, min=1e-4, max=1 - 1e-4)

        cre_loss = -one_hot_labels*torch.log(cre_loss)
        cre_loss = torch.sum(cre_loss, dim=-1)

        loss = cre_loss
        loss = loss.view(-1)

        loss_list = []
        for i in self.label_big:
            nb = labels[labels == torch.tensor(i).long().to(labels.device)].numel()
            lb_min = nb//16
            if nb>32:
                lb_loss = loss[labels == torch.tensor(i).long().to(labels.device)]
                lb_loss_hard = lb_loss[lb_loss > self.thresh]
                if lb_loss_hard.numel() < lb_min:
                    lb_loss_hard, _ = lb_loss.topk(lb_min)
                loss_list.append(lb_loss_hard)

        for i in range(self.num_classes):
            if i not  in self.label_big:
                little_sample = loss[labels == torch.tensor(i).long().to(labels.device)]
                loss_list.append(little_sample)
        total_loss = 0
        total_nb = 0
        for l in loss_list:
            total_loss+=torch.sum(l)
            total_nb+=l.numel()
        return total_loss /total_nb



if __name__ == '__main__':
    md = OhemCELoss(0.7)