import torch
from torch import nn
from torch.nn import  functional as F
import numpy as np



class SigCross(nn.Module):

    def __init__(self, label_num= 19,lb_ignore=255):
        super(SigCross, self).__init__()
        self.num_classes = label_num
        self.lb_ignore = lb_ignore
        #self.thresh = -torch.log(torch.tensor(0.7, requires_grad=False, dtype=torch.float)).cuda()
        self.thresh = -np.log(0.7)
    def forward(self, logits, target):
        B,_,_, _ = logits.size()
        n_min = target[target != self.lb_ignore].numel()//16
        labels = torch.clone(target)
        logits = logits.permute(0, 2, 3, 1).reshape( -1, self.num_classes)
        labels = labels.view(-1)
        logits = logits[labels != self.lb_ignore]
        labels = labels[labels != self.lb_ignore]
        one_hot_labels = F.one_hot(labels,self.num_classes)
        out = torch.sigmoid(logits)
        out = torch.clip(out, min=1e-4, max=1 - 1e-4)

        sig_loss = -one_hot_labels * torch.log(out)*(1-out)  - (1 - one_hot_labels) * torch.log(1 - out)*out
        sig_loss = torch.sum(sig_loss,dim=-1)

        cre_loss = torch.softmax(logits, dim=-1)
        cre_loss = torch.clip(cre_loss, min=1e-4, max=1 - 1e-4)

        cre_loss = -one_hot_labels*torch.log(cre_loss)
        cre_loss = torch.sum(cre_loss, dim=-1)

        loss = sig_loss+cre_loss
        loss = loss.view(-1)

        loss_list = []
        for i in range(3):
            nb = labels[labels == torch.tensor(i).long().to(labels.device)].numel()
            lb_min = nb//16
            if nb>32:
                lb_loss = loss[labels == torch.tensor(i).long().to(labels.device)]
                lb_loss_hard = lb_loss[lb_loss > self.thresh]
                if lb_loss_hard.numel() < lb_min:
                    lb_loss_hard, _ = lb_loss.topk(lb_min)
                loss_list.append(lb_loss_hard)
        little_sample = loss[labels > torch.tensor(2).long().to(labels.device)]*3
        #little_num = little_sample.numel()
        '''
        if little_num > 16:
            little_hard_num = little_num // 2
            little_loss_hard = little_sample[little_sample > self.thresh]
            if little_loss_hard.numel() < little_hard_num:
                little_loss_hard, _ = little_sample.topk(little_hard_num)
            loss_list.append(little_loss_hard)
        '''
        loss_list.append(little_sample)
        total_loss = 0
        total_nb = 0
        for l in loss_list:
            total_loss+=torch.sum(l)
            total_nb+=l.numel()
        return total_loss /total_nb



class SigCrossV2(nn.Module):

    def __init__(self, label_num= 19,lb_ignore=255):
        super(SigCrossV2, self).__init__()
        self.num_classes = label_num
        self.lb_ignore = lb_ignore
        #self.thresh = -torch.log(torch.tensor(0.7, requires_grad=False, dtype=torch.float)).cuda()
        self.thresh = -np.log(0.7)
    def forward(self, logits, target):
        B,C,H, W = logits.size()

        mask = torch.ones(B*W*H, C).float().to(logits.device)
        mask[:,0] = mask[:,0]*0.0
        n_min = target[target != self.lb_ignore].numel()//16
        labels = torch.clone(target)
        logits = logits.permute(0, 2, 3, 1).reshape( -1, self.num_classes)
        labels = labels.view(-1)
        logits = logits[labels != self.lb_ignore]
        labels = labels[labels != self.lb_ignore]
        one_hot_labels = F.one_hot(labels,self.num_classes)
        out = torch.sigmoid(logits)
        out = torch.clip(out, min=1e-4, max=1 - 1e-4)

        sig_loss = -one_hot_labels * torch.log(out)*(1-out)  - (1 - one_hot_labels) * torch.log(1 - out)*out
        sig_loss = sig_loss*mask

        sig_loss = torch.sum(sig_loss,dim=-1)

        cre_loss = torch.softmax(logits, dim=-1)
        cre_loss = torch.clip(cre_loss, min=1e-4, max=1 - 1e-4)

        cre_loss = -one_hot_labels*torch.log(cre_loss)
        cre_loss = torch.sum(cre_loss, dim=-1)

        loss = sig_loss+cre_loss
        loss = loss.view(-1)

        loss_list = []
        for i in range(3):
            nb = labels[labels == torch.tensor(i).long().to(labels.device)].numel()
            lb_min = nb//16
            if nb>32:
                lb_loss = loss[labels == torch.tensor(i).long().to(labels.device)]
                lb_loss_hard = lb_loss[lb_loss > self.thresh]
                if lb_loss_hard.numel() < lb_min:
                    lb_loss_hard, _ = lb_loss.topk(lb_min)
                loss_list.append(lb_loss_hard)
        little_sample = loss[labels > torch.tensor(2).long().to(labels.device)]*3
        #little_num = little_sample.numel()
        '''
        if little_num > 16:
            little_hard_num = little_num // 2
            little_loss_hard = little_sample[little_sample > self.thresh]
            if little_loss_hard.numel() < little_hard_num:
                little_loss_hard, _ = little_sample.topk(little_hard_num)
            loss_list.append(little_loss_hard)
        '''
        loss_list.append(little_sample)
        total_loss = 0
        total_nb = 0
        for l in loss_list:
            total_loss+=torch.sum(l)
            total_nb+=l.numel()
        return total_loss /total_nb



class SigCrossV3(nn.Module):

    def __init__(self, label_num= 19,lb_ignore=255):
        super(SigCrossV3, self).__init__()
        self.num_classes = label_num
        self.lb_ignore = lb_ignore
        #self.thresh = -torch.log(torch.tensor(0.7, requires_grad=False, dtype=torch.float)).cuda()
        self.thresh = -np.log(0.7)
    def forward(self, logits, target):
        B,C,H, W = logits.size()
        labels = torch.clone(target)
        logits = logits.permute(0, 2, 3, 1).reshape( -1, self.num_classes)
        labels = labels.view(-1)
        logits = logits[labels != self.lb_ignore]
        labels = labels[labels != self.lb_ignore]
        one_hot_labels = F.one_hot(labels,self.num_classes)


        cre_loss = torch.softmax(logits, dim=-1)
        cre_loss = torch.clip(cre_loss, min=1e-4, max=1 - 1e-4)

        cre_loss = -one_hot_labels*torch.log(cre_loss)
        cre_loss = torch.sum(cre_loss, dim=-1)

        loss = cre_loss
        loss = loss.view(-1)

        loss_list = []
        for i in range(3):
            nb = labels[labels == torch.tensor(i).long().to(labels.device)].numel()
            lb_min = nb//16
            if nb>32:
                lb_loss = loss[labels == torch.tensor(i).long().to(labels.device)]
                lb_loss_hard = lb_loss[lb_loss > self.thresh]
                if lb_loss_hard.numel() < lb_min:
                    lb_loss_hard, _ = lb_loss.topk(lb_min)
                loss_list.append(lb_loss_hard)
        little_sample = loss[labels > torch.tensor(2).long().to(labels.device)]*3
        loss_list.append(little_sample)
        total_loss = 0
        total_nb = 0
        for l in loss_list:
            total_loss+=torch.sum(l)
            total_nb+=l.numel()
        return total_loss /total_nb









if __name__ == '__main__':
    md = SigCross(0.7)