import torch
from torch import nn
from torch.nn import  functional as F
import numpy as np
class OhemCELoss(nn.Module):

    def __init__(self, thresh, lb_ignore=255, factor= 16):
        super(OhemCELoss, self).__init__()
        self.factor = factor
        self.thresh = -torch.log(torch.tensor(thresh, requires_grad=False, dtype=torch.float)).cuda()
        self.lb_ignore = lb_ignore
        self.criteria = nn.CrossEntropyLoss(ignore_index=lb_ignore, reduction='none')

    def forward(self, logits, labels,epoch=0):
        n_min = labels[labels != self.lb_ignore].numel() // self.factor
        loss = self.criteria(logits, labels)
        loss = loss.view(-1)
        loss_hard = loss[loss > self.thresh]
        if loss_hard.numel() < n_min:
            loss_hard, _ = loss.topk(n_min)
        return torch.mean(loss_hard)

class OhemCELossFPV(nn.Module):

    def __init__(self, thresh, lb_ignore=255, factor=16):
        super(OhemCELossFPV, self).__init__()
        self.factor = factor
        self.thresh = -torch.log(torch.tensor(thresh, requires_grad=False, dtype=torch.float)).cuda()
        self.lb_ignore = lb_ignore
        self.criteria = nn.CrossEntropyLoss(ignore_index=lb_ignore, reduction='none')

    def forward(self, logits, labels, epoch=0):
        n_min = labels[labels != self.lb_ignore].numel() // self.factor
        loss = self.criteria(logits, labels)
        if torch.sum(labels == 8)>0:
            loss[labels == 8] = loss[labels == 8] * 2.0
        if torch.sum(labels == 6) > 0:
            loss[labels == 6] = loss[labels == 6] * 3.0
        loss = loss.view(-1)
        loss_hard = loss[loss > self.thresh]
        if loss_hard.numel() < n_min:
            loss_hard, _ = loss.topk(n_min)
        return torch.mean(loss_hard)

class OhemCELossCity(nn.Module):

    def __init__(self, thresh, lb_ignore=255, factor= 16):
        super(OhemCELossCity, self).__init__()
        self.factor = factor
        self.thresh = -torch.log(torch.tensor(thresh, requires_grad=False, dtype=torch.float))
        self.lb_ignore = lb_ignore
        self.criteria = nn.CrossEntropyLoss(ignore_index=lb_ignore, reduction='none')

    def forward(self, logits, labels):
        self.thresh = self.thresh.to(logits.device)
        n_min = labels[labels != self.lb_ignore].numel() // self.factor
        loss = self.criteria(logits, labels)
        loss = loss.view(-1)
        loss_hard = loss[loss > self.thresh]
        if loss_hard.numel() < n_min:
            loss_hard, _ = loss.topk(n_min)
        return torch.mean(loss_hard)

class OhemCELossCityNew(nn.Module):
    def __init__(self, thresh=0.7, lb_ignore=255, factor=16):
        super(OhemCELossCityNew, self).__init__()
        self.factor = factor
        self.thresh = -torch.log(torch.tensor(thresh, requires_grad=False, dtype=torch.float)).cuda()
        self.lb_ignore = lb_ignore
        self.criteria = nn.CrossEntropyLoss(ignore_index=lb_ignore, reduction='none')

    def forward(self, logits, labels):
        n_min = labels[labels != self.lb_ignore].numel() // self.factor
        loss = self.criteria(logits, labels)
        nb = labels[labels > 3].numel()
        if nb>3:
            little_sample = loss[labels>3]

        loss = loss.view(-1)
        loss_hard = loss[loss > self.thresh]

        if loss_hard.numel() < n_min:
            loss_hard, _ = loss.topk(n_min)

        if nb>3:
            total_loss = torch.sum(loss_hard)*4+torch.sum(little_sample)
            return total_loss/(nb+loss_hard.numel())
        else:
            return torch.mean(loss_hard)



class OhemCELossFpv(nn.Module):
    def __init__(self,  lb_ignore=255, factor=16):
        super(OhemCELossFpv, self).__init__()
        self.factor = factor
        self.lb_ignore = lb_ignore
        self.criteria = nn.CrossEntropyLoss(ignore_index=lb_ignore, reduction='none')

    def forward(self, logits, labels, epoch):
        loss = self.criteria(logits, labels)
        nb = labels[labels > 3].numel()
        little_sample = loss[labels>3]
        n_min = nb*10
        loss = loss.view(-1)
        loss_hard, _ = loss.topk(n_min)

        total_loss = torch.sum(loss_hard)+torch.sum(little_sample)
        return total_loss/(nb+loss_hard.numel())



class CosCELoss(nn.Module):

    def __init__(self, thresh, lb_ignore=255, max_epoch=150):
        super(CosCELoss, self).__init__()
        self.max_epoch = max_epoch
        self.thresh = -torch.log(torch.tensor(thresh, requires_grad=False, dtype=torch.float)).cuda()
        self.lb_ignore = lb_ignore
        self.criteria = nn.CrossEntropyLoss(ignore_index=lb_ignore, reduction='none')

    def forward(self, logits, labels,epoch):
        n_min = labels[labels != self.lb_ignore].numel()
        loss = self.criteria(logits, labels)
        loss = loss.view(-1)
        sorted_loss, _ = torch.sort(loss)
        factor = np.clip(np.cos(min(epoch, self.max_epoch)/self.max_epoch)*np.pi/2, a_min=0.05, a_max=1-2/n_min)

        loss_easy = factor* torch.mean(sorted_loss[0:int(n_min*factor)])
        loss_hard = (1-factor) * torch.mean(sorted_loss[int(n_min * factor):])

        return loss_easy+loss_hard



class SigLossCity(nn.Module):

    def __init__(self, label_num= 19,lb_ignore=255):
        super(SigLossCity, self).__init__()
        self.num_classes = label_num
        self.lb_ignore = lb_ignore
        self.thresh = -torch.log(torch.tensor(1.0, requires_grad=False, dtype=torch.float)).cuda()
    def forward(self, logits, target):
        B,_,_, _ = logits.size()
        n_min = target[target != self.lb_ignore].numel()//16
        labels = torch.clone(target)
        logits = logits.permute(0, 2, 3, 1).reshape( -1, self.num_classes)
        labels = labels.view(-1)

        logits = logits[labels!=self.lb_ignore]
        labels = labels[labels != self.lb_ignore]

        labels = F.one_hot(labels,self.num_classes)
        out = torch.sigmoid(logits)
        out = torch.clip(out, min=1e-4, max=1 - 1e-4)
        sig_loss = -labels * torch.log(out)*(1-out)  - (1 - labels) * torch.log(1 - out)*out
        sig_loss = torch.sum(sig_loss,dim=-1)

        out = torch.softmax(logits, dim=-1)
        out = torch.clip(out, min=1e-4, max=1 - 1e-4)
        cre_loss = -labels*torch.log(out)
        cre_loss = torch.sum(cre_loss, dim=-1)
        loss = sig_loss+cre_loss


        loss = loss.view(-1)
        loss_hard = loss[loss > self.thresh]
        if loss_hard.numel() < n_min:
            loss_hard, _ = loss.topk(n_min)
        return torch.mean(loss_hard)



class SigCross(nn.Module):

    def __init__(self, label_num= 19,lb_ignore=255):
        super(SigCross, self).__init__()
        self.num_classes = label_num
        self.lb_ignore = lb_ignore
        #self.thresh = -torch.log(torch.tensor(0.7, requires_grad=False, dtype=torch.float)).cuda()
        self.thresh = -np.log(0.7)
    def forward(self, logits, target):

        B,_,_, _ = logits.size()
        n_min = target[target != self.lb_ignore].numel()//16
        labels = torch.clone(target)
        logits = logits.permute(0, 2, 3, 1).reshape( -1, self.num_classes)
        labels = labels.view(-1)

        logits = logits[labels != self.lb_ignore]
        labels = labels[labels != self.lb_ignore]

        one_hot_labels = F.one_hot(labels,self.num_classes)
        out = torch.sigmoid(logits)
        out = torch.clip(out, min=1e-4, max=1 - 1e-4)


        sig_loss = -one_hot_labels * torch.log(out)*(1-out)  - (1 - one_hot_labels) * torch.log(1 - out)*out
        sig_loss = torch.sum(sig_loss,dim=-1)

        cre_loss = torch.softmax(logits, dim=-1)
        cre_loss = torch.clip(cre_loss, min=1e-4, max=1 - 1e-4)


        cre_loss = -one_hot_labels*torch.log(cre_loss)
        cre_loss = torch.sum(cre_loss, dim=-1)

        loss = sig_loss+cre_loss
        loss = loss.view(-1)


        fn_nb = labels[labels == torch.tensor(3).long().to(labels.device)].numel()
        if fn_nb>5:
            loss[labels == torch.tensor(3).long().to(labels.device)] = loss[labels == torch.tensor(3).long().to(labels.device)]*2.0
        little_sample = loss[labels > torch.tensor(3).long().to(labels.device)]
        loss_hard = loss[loss > self.thresh]
        if loss_hard.numel() < n_min:
            loss_hard, _ = loss.topk(n_min)
        total_loss = torch.sum(loss_hard) + torch.sum(little_sample) * 4
        return total_loss / (little_sample.numel() + loss_hard.numel()+5.0)







class FocalCity(nn.Module):

    def __init__(self, label_num= 19,lb_ignore=255):
        super(FocalCity, self).__init__()
        self.num_classes = label_num
        self.lb_ignore = lb_ignore

    def forward(self, logits, target):
        labels = torch.clone(target)
        labels[labels==self.lb_ignore] = self.num_classes
        labels = F.one_hot(labels,self.num_classes+1)
        labels = labels[:,:, :,0:self.num_classes]
        labels = labels.permute(0, 3, 1, 2).contiguous().float()
        out = torch.sigmoid(logits)
        out = torch.clip(out, min=1e-6, max=1 - 1e-6)
        loss = -labels * torch.log(out)*(1-out)  - (1 - labels) * torch.log(1 - out)* out
        loss = torch.sum(loss,dim=1).mean()
        return loss

class FocalLoss(nn.Module):
    def __init__(self, lb_ignore=255, gamma=2, alpha=1):
        super(FocalLoss, self).__init__()
        self.gamma = gamma
        self.alpha = alpha
        self.label_ignore = lb_ignore

    def forward(self, inputs, targets):
        ce_loss = F.cross_entropy(inputs, targets, reduction='none', ignore_index=self.label_ignore)
        pt = torch.exp(-ce_loss)
        focal_loss = ((1 - pt) ** self.gamma) * ce_loss
        return torch.mean(focal_loss * self.alpha)


def dice_loss(probs,target):
    eps = 1
    target_f = target.float()
    num = probs * target_f
    num = torch.sum(num, dim=3)
    num = torch.sum(num, dim=2)

    den1 = probs * probs
    den1 = torch.sum(den1, dim=3)
    den1 = torch.sum(den1, dim=2)

    den2 = target_f * target_f
    den2 = torch.sum(den2, dim=3)
    den2 = torch.sum(den2, dim=2)
    dice = (2 * num + eps) / (den1 + den2 + eps)
    loss = 1- dice.mean()
    return loss
def sg_loss(x1, target):
    target = torch.unsqueeze(target, 1)
    out = torch.sigmoid(x1)
    out = torch.clip(out, min=1e-6, max=1 - 1e-6)
    loss = -target * torch.log(out) * (1 - out) ** 2 - (1 - target) * torch.log(1 - out) * out ** 2
    return loss.sum()/(target.sum()+10.0)

def cross_loss(xs, target):
    target = torch.argmax(target, dim=1)
    loss = F.cross_entropy(xs, target, label_smoothing=0.0, reduction='mean')

    return loss










def aux_loss(xs, target):
    ntarget = target.clone()
    ntarget[:,0,:,:] = ntarget[:,0,:,:]+ torch.sum(ntarget[:,3:,:,:], dim=1, keepdim=False)
    ntarget = ntarget[:,0:3,:,:]
    losses = []
    for x in xs:
        out = torch.sigmoid(x)
        out = torch.clip(out, min=1e-6, max=1 - 1e-6)
        loss = -ntarget * torch.log(out) * (1 - out) ** 2 - (1 - ntarget) * torch.log(1 - out) * out ** 2
        losses.append(loss.mean()*0.1+dice_loss(out, ntarget))
    total_loss = sum(losses)
    return total_loss


if __name__ == '__main__':
    md = OhemCELoss(0.7)