import torch
import numpy as np
import torch.nn as nn
import math
from functools import partial
from torch.nn import *
import torch.nn.functional as F
from torchvision import models
from torch.autograd import Variable
from torch.optim.lr_scheduler import _LRScheduler
from segmentation_models_pytorch.utils import losses
from .LovaszSoftmax.pytorch.lovasz_losses import lovasz_hinge

def l2_norm(input, axis=1):
    norm = torch.norm(input, 2, axis, True)
    output = torch.div(input, norm)
    return output

def weights_init_kaiming(m):
    classname = m.__class__.__name__
    if classname.find("Linear") != -1:
        nn.init.kaiming_normal_(m.weight, a=0, mode='fan_out')
        nn.init.constant_(m.bias, 0.0)
    elif classname.find('Conv') != -1:
        nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')
        if m.bias is not None:
            nn.init.constant_(m.bias, 0.0)
    elif classname.find('BatchNorm') != -1:
        if m.affine:
            nn.init.constant_(m.weight, 1.0)
            nn.init.constant_(m.bias, 0.0)

def weights_init_classifier(m):
    classname = m.__class__.__name__
    if classname.find("Linear") != -1:
        nn.init.normal_(m.weight, std=0.001)
        if m.bias:
            nn.init.constant_(m.bias, 0.0)

def try_bestfitting_loss(results, labels, selected_num=10):
    batch_size, class_num = results.shape
    labels = labels.view(-1, 1)
    one_hot_target = torch.zeros(batch_size, class_num + 1).cuda().scatter_(1, labels, 1)[:, :5004].contiguous()
    error_loss = lovasz_hinge(results, one_hot_target)
    labels = labels.view(-1)
    indexs_new = (labels!=5004).nonzero().view(-1)
    if len(indexs_new) == 0:
        return error_loss
    results_nonew = results[torch.arange(0,len(results))[indexs_new], labels[indexs_new]].contiguous()
    target_nonew = torch.ones_like(results_nonew).float().cuda()
    nonew_loss = nn.BCEWithLogitsLoss(reduce=True)(results_nonew, target_nonew)
    return nonew_loss + error_loss

class FocalLoss(nn.Module):
    def __init__(self, class_num, alpha=None, gamma=2, use_alpha=False, size_average=True, smooth=None):
        super(FocalLoss, self).__init__()
        self.class_num = class_num
        self.alpha = alpha
        self.gamma = gamma
        if use_alpha:
            self.alpha = torch.tensor(alpha).cuda()

        self.softmax = nn.Softmax(dim=1)
        self.use_alpha = use_alpha
        self.size_average = size_average
        self.smooth = smooth
        if self.smooth is not None:
            if self.smooth < 0 or self.smooth >1.0:
                raise ValueError('smooth value shoule be in [0,1]')

    def forward(self, pred, target):
        prob = self.softmax(pred.view(-1,self.class_num))
        prob = prob.clamp(min=0.0001,max=1.0)
        
        if pred.shape == target.shape:
            target_ = target
        else:
            if torch.cuda.is_available():
                target_ = torch.zeros(target.size(0), self.class_num).cuda()
            else:
                target_ = torch.zeros(target.size(0), self.class_num)
            target_.scatter_(1, target.view(-1, 1).long(), 1.)
        
        if self.smooth:
            target_ = (1.0 - self.smooth)*target_
            target_ = target_ + self.smooth/(self.class_num)

        if self.use_alpha:
            batch_loss = - self.alpha.double() * torch.pow(1-prob,self.gamma).double() * prob.log().double() * target_.double()
        else:
            batch_loss = - torch.pow(1-prob,self.gamma).double() * prob.log().double() * target_.double()

        batch_loss = batch_loss.sum(dim=1)

        if self.size_average:
            loss = batch_loss.mean()
        else:
            loss = batch_loss.sum()

        return loss

class CELoss(nn.Module):
    def __init__(self, class_num, alpha=None, use_alpha=False, size_average=True, smooth=None):
        super(CELoss, self).__init__()
        self.class_num = class_num
        self.alpha = alpha
        if use_alpha:
            if torch.cuda.is_available():
                self.alpha = torch.tensor(alpha).cuda()
            else:
                self.alpha = torch.tensor(alpha)

        self.softmax = nn.Softmax(dim=1)
        self.use_alpha = use_alpha
        self.size_average = size_average
        self.smooth = smooth

        if self.smooth is not None:
            if self.smooth < 0 or self.smooth >1.0:
                raise ValueError('smooth value shoule be in [0,1]')

    def forward(self, pred, target):
        prob = self.softmax(pred.view(-1,self.class_num))
        prob = prob.clamp(min=0.0001,max=1.0)
        
        if pred.shape == target.shape:
            target_ = target
        else:
            if torch.cuda.is_available():
                target_ = torch.zeros(target.size(0), self.class_num).cuda()
            else:
                target_ = torch.zeros(target.size(0), self.class_num)
            target_.scatter_(1, target.view(-1, 1).long(), 1.)
    
        if self.smooth:
            target_ = (1.0 - self.smooth)*target_
            target_ = target_ + self.smooth/(self.class_num)
        
        if self.use_alpha:
            batch_loss = - self.alpha.double() * prob.log().double() * target_.double()
        else:
            batch_loss = - prob.log().double() * target_.double()
        
        batch_loss = batch_loss.sum(dim=1)

        # print(prob[0],target[0],target_[0],batch_loss[0])
        # print('--')

        if self.size_average:
            loss = batch_loss.mean()
        else:
            loss = batch_loss.sum()

        return loss

class Arcface(Module):
    # implementation of additive margin softmax loss in https://arxiv.org/abs/1801.05599
    def __init__(self, embedding_size=512, classnum=51332,  s=64., m=0.5):
        super(Arcface, self).__init__()
        self.classnum = classnum
        self.kernel = Parameter(torch.Tensor(embedding_size,classnum))
        # initial kernel
        self.kernel.data.uniform_(-1, 1).renorm_(2,1,1e-5).mul_(1e5)
        self.m = m # the margin value, default is 0.5
        self.s = s # scalar value default is 64, see normface https://arxiv.org/abs/1704.06369
        self.cos_m = math.cos(m)
        self.sin_m = math.sin(m)
        self.mm = self.sin_m * m  # issue 1
        self.threshold = math.cos(math.pi - m)

    def forward(self, embbedings, label):
        # weights norm
        nB = len(embbedings)
        kernel_norm = l2_norm(self.kernel, axis=0)
        # cos(theta+m)
        cos_theta = torch.mm(embbedings,kernel_norm)
#         output = torch.mm(embbedings,kernel_norm)
        cos_theta = cos_theta.clamp(-1,1) # for numerical stability
        cos_theta_2 = torch.pow(cos_theta, 2)
        sin_theta_2 = 1 - cos_theta_2
        sin_theta = torch.sqrt(sin_theta_2)
        cos_theta_m = (cos_theta * self.cos_m - sin_theta * self.sin_m)
        # this condition controls the theta+m should in range [0, pi]
        #      0<=theta+m<=pi
        #     -m<=theta<=pi-m
        cond_v = cos_theta - self.threshold
        cond_mask = cond_v <= 0
        keep_val = (cos_theta - self.mm) # when theta not in [0,pi], use cosface instead
        cos_theta_m[cond_mask] = keep_val[cond_mask]
        output = cos_theta * 1.0 # a little bit hacky way to prevent in_place operation on cos_theta
        # idx_ = torch.arange(0, nB, dtype=torch.long)
        idx_ = torch.nonzero(label < self.classnum).view(-1)

        output[idx_, label[idx_]] = cos_theta_m[idx_, label[idx_]]
        output *= self.s # scale up in order to make softmax work, first introduced in normface
        return output

    def inter(self, embbedings):
        # weights norm
        kernel_norm = l2_norm(self.kernel, axis=0)
        # cos(theta+m)
        cos_theta = torch.mm(embbedings, kernel_norm)
        #         output = torch.mm(embbedings,kernel_norm)
        cos_theta = cos_theta.clamp(-1, 1)  # for numerical stability
        output = cos_theta * 1.0  # a little bit hacky way to prevent in_place operation on cos_theta
        output *= self.s  # scale up in order to make softmax work, first introduced in normface
        return output

class wnfc(Module):
    # implementation of additive margin softmax loss in https://arxiv.org/abs/1801.05599
    def __init__(self, embedding_size=512, classnum=51332,  s=16.):
        super(wnfc, self).__init__()
        self.classnum = classnum
        self.kernel = Parameter(torch.Tensor(embedding_size,classnum))
        # initial kernel
        self.kernel.data.uniform_(-1, 1).renorm_(2,1,1e-5).mul_(1e5)
        self.s = s


    def forward(self, embbedings):
        # weights norm
        kernel_norm = l2_norm(self.kernel, axis=0)
        # cos(theta+m)
        cos_theta = torch.mm(embbedings, kernel_norm)
        #         output = torch.mm(embbedings,kernel_norm)
        cos_theta = cos_theta.clamp(-1, 1)  # for numerical stability
        output = cos_theta * 1.0  # a little bit hacky way to prevent in_place operation on cos_theta
        output *= self.s  # scale up in order to make softmax work, first introduced in normface
        return output

def get_activation_fn(activation: str=None):
    """Returns the activation function from ``torch.nn`` by its name."""
    if activation is None or activation.lower() == "none":
        activation_fn = lambda x: x  # noqa: E731
    else:
        activation_fn = torch.nn.__dict__[activation]()
    return activation_fn

def dice(
    outputs: torch.Tensor,
    targets: torch.Tensor,
    eps: float = 1e-7,
    threshold: float = None,
    activation: str = "Sigmoid",
):
    """
    Computes the dice metric
    Args:
        outputs (list):  A list of predicted elements
        targets (list): A list of elements that are to be predicted
        eps (float): epsilon
        threshold (float): threshold for outputs binarization
    Returns:
        double:  Dice score
    """
    activation_fn = get_activation_fn(activation)
    outputs = activation_fn(outputs)

    if threshold is not None:
        outputs = (outputs > threshold).float()

    intersection = torch.sum(targets * outputs)
    union = torch.sum(targets) + torch.sum(outputs)
    # this looks a bit awkward but `eps * (union == 0)` term
    # makses sure that if I and U are both 0, then Dice == 1
    # and if U != 0 and I == 0 the eps term in numerator is zeroed out
    # i.e. (0+eps) / (U - 0 + eps) doesn't happen
    dice = (2 * intersection + eps * (union == 0)) / (union + eps)

    return dice

class DiceLoss(nn.Module):
    def __init__(self, eps: float = 1e-7, threshold: float = None):
        super().__init__()

        self.loss_fn = partial(
            dice,
            eps=eps,
            threshold=threshold,
        )

    def forward(self, logits, targets):
        dice_coe = self.loss_fn(logits, targets)
        return 1 - dice_coe


class BCEDiceLoss(nn.Module):
    def __init__(
            self,
            eps: float = 1e-7,
            threshold: float = None,
            bce_weight: float = 0.5,
            dice_weight: float = 0.5,
    ):
        super().__init__()

        if bce_weight == 0 and dice_weight == 0:
            raise ValueError(
                "Both bce_wight and dice_weight cannot be "
                "equal to 0 at the same time."
            )

        self.bce_weight = bce_weight
        self.dice_weight = dice_weight

        if self.bce_weight != 0:
            self.bce_loss = nn.BCEWithLogitsLoss()

        if self.dice_weight != 0:
            self.dice_loss = DiceLoss(eps=eps, threshold=threshold)

    def forward(self, outputs, targets):
        if self.bce_weight == 0:
            return self.dice_weight * self.dice_loss(outputs, targets)
        if self.dice_weight == 0:
            return self.bce_weight * self.bce_loss(outputs, targets)

        bce = self.bce_loss(outputs, targets)
        dice = self.dice_loss(outputs, targets)
        loss = self.bce_weight * bce + self.dice_weight * dice
        return loss

class LovaszHingeLoss(nn.Module):
    def __init__(self):
        super().__init__()

    def forward(self, input, target):
        input = input.squeeze(1)
        target = target.squeeze(1)
        loss = lovasz_hinge(input, target, per_image=True)

        return loss


# class CenterLoss(nn.Module):
#     def __init__(self, num_classes, feat_dim):
#         super(CenterLoss, self).__init__()
#         self.num_classes = num_classes
#         self.feat_dim = feat_dim
#         self.centers = torch.nn.Parameter(torch.randn(num_classes, feat_dim))
#         self.classes = torch.arange(self.num_classes).long()
#         self.use_cuda = False

#     def forward(self, y, labels):
#         # print(self.use_cuda)
#         batch_size = y.size(0)
#         # distmat = y**2 + centers**2
#         distmat = torch.pow(y, 2).sum(dim=1, keepdim=True).expand(batch_size, self.num_classes) + \
#                   torch.pow(self.centers, 2).sum(dim=1, keepdim=True).expand(self.num_classes, batch_size).t()
#         # distmat = distmat - 2* dot(y, centers.t()) = (y-centers)**2
#         distmat.addmm_(1, -2, y, self.centers.t())

#         labels = labels.expand(batch_size, self.num_classes)
#         mask = labels.eq(self.classes.expand(batch_size, self.num_classes))

#         dist = distmat * mask.float()
#         loss = dist.clamp(min=1e-12, max=1e+12).sum() / batch_size
#         return loss

#     def cuda(self, device_id=None):
#         self.use_cuda = True
#         self.classes.cuda(device_id)
#         return self._apply(lambda t: t.cuda(device_id))

#     def _apply(self, fn):
#         for module in self.children():
#             module._apply(fn)

#         for param in self._parameters.values():
#             if param is not None:
#                 param.data = fn(param.data)
#                 if param._grad is not None:
#                     param._grad.data = fn(param._grad.data)

#         for key, buf in self._buffers.items():
#             if buf is not None:
#                 self._buffers[key] = fn(buf)

#         self.use_cuda = True
#         self.classes = fn(self.classes)
#         return self

# class CECenterLoss(nn.Module):
#     def __init__(
#             self,
#             # eps: float = 1e-7,
#             class_num,
#             feat_dim: int = 512, 
#             ce_weight: float = 0.9,
#             center_weight: float = 0.1,
#     ):
#         super().__init__()

#         if ce_weight == 0 and center_weight == 0:
#             raise ValueError(
#                 "Both bce_wight and dice_weight cannot be "
#                 "equal to 0 at the same time."
#             )

#         self.ce_weight = ce_weight
#         self.center_weight = center_weight

#         if self.ce_weight != 0:
#             self.ce_loss = CELoss(class_num=class_num)

#         if self.center_weight != 0:
#             self.center_loss = CenterLoss(num_classes=class_num, feat_dim=feat_dim)

#     def forward(self, outputs, targets):
#         if self.ce_weight == 0:
#             return self.center_weight * self.center_loss(outputs, targets)
#         if self.center_weight == 0:
#             return self.ce_weight * self.ce_loss(outputs, targets)

#         ce = self.ce_loss(outputs, targets)
#         center = self.center_loss(outputs, targets)
#         loss = self.ce_weight * ce + self.center_weight * center
#         return loss


class GHMCLoss(nn.Module):
    """GHM Classification Loss.
    Details of the theorem can be viewed in the paper
    "Gradient Harmonized Single-stage Detector".
    https://arxiv.org/abs/1811.05181
    Args:
        bins (int): Number of the unit regions for distribution calculation.
        momentum (float): The parameter for moving average.
        use_sigmoid (bool): Can only be true for BCE based loss now.
        loss_weight (float): The weight of the total GHM-C loss.
    """

    def __init__(self, class_num, bins=10, momentum=0.75, use_sigmoid=True, class_weight=None):
        super(GHMCLoss, self).__init__()
        self.bins = bins
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        self.momentum = momentum
        self.class_num = class_num
        edges = torch.arange(bins + 1).float() / bins
        edges = edges.to('cuda')
        self.register_buffer('edges', edges)
        self.edges[-1] += 1e-6
        if momentum > 0:
            acc_sum = torch.zeros(bins)
            self.register_buffer('acc_sum', acc_sum)
        self.use_sigmoid = use_sigmoid
        self.class_weight = torch.Tensor(class_weight)
        self.softmax = nn.Softmax(dim=1)
        if not self.use_sigmoid:
            raise NotImplementedError

    def forward(self, pred, target_, label_weight=None, *args, **kwargs):
        # print('pred:', pred)
        # print('target:', target)
        """Calculate the GHM-C loss.
        Args:
            pred (float tensor of size [batch_num, class_num]):
                The direct prediction of classification fc layer.
            target (float tensor of size [batch_num, class_num]):
                Binary class target for each sample.
            label_weight (float tensor of size [batch_num, class_num]):
                the value is 1 if the sample is valid and 0 if ignored.
        Returns:
            The gradient harmonized loss.
        """
        # the target should be binary class label
        # if pred.dim() != target.dim():
        #     target, label_weight = _expand_binary_labels(
        #         target, label_weight, pred.size(-1))
        if pred.shape == target_.shape:
            target = target_
            y = self.softmax(target_.view(-1,self.class_num))
            y = y.clamp(min=0.0001,max=1.0)
        else:
            y = target_
            if torch.cuda.is_available():
                target = torch.zeros(target_.size(0), self.class_num).cuda()
            else:
                target = torch.zeros(target_.size(0), self.class_num)
            target.scatter_(1, target_.view(-1, 1).long(), 1.)
        
        if label_weight == None:
            label_weight = torch.from_numpy(np.ones((pred.shape[0], pred.shape[1])))

        # target = target.float()
        edges = self.edges
        mmt = self.momentum
        weights = torch.zeros_like(pred).to('cuda:0').float()
        # weights = weights.float()

        # gradient length
        g = torch.abs(pred.sigmoid().detach() - target)

        if torch.cuda.is_available():
            label_weight = label_weight.to('cuda')
        valid = label_weight > 0
        tot = max(valid.float().sum().item(), 1.0)
        n = 0  # n valid bins
        for i in range(self.bins):
            inds = (g >= edges[i]) & (g < edges[i + 1]) & valid
            num_in_bin = inds.sum().item()
            if num_in_bin > 0:
                if mmt > 0:
                    self.acc_sum[i] = mmt * self.acc_sum[i] \
                        + (1 - mmt) * num_in_bin
                    weights[inds] = tot / self.acc_sum[i]
                else:
                    weights[inds] = tot / num_in_bin
                n += 1
        if n > 0:
            weights = weights / n

        weights = weights.half()
        loss = F.binary_cross_entropy_with_logits(
            pred, target, weights, reduction='sum') / tot
        
        if self.class_weight is not None:
            weight = self.class_weight[y].view((1, pred.shape[0]))
            loss = loss * weight.to(self.device)

        return loss.mean()

class WeightedCrossEntropyLoss(nn.Module):
    def __init__(self, class_num, class_weight=None, size_average=True):
        super(WeightedCrossEntropyLoss, self).__init__()
        self.class_num = class_num
        if class_weight is not None:
            self.class_weight = torch.Tensor(class_weight)
        else:
            self.class_weight = None

        self.softmax = nn.Softmax(dim=1)
        self.size_average = size_average

    def forward(self, pred, target):
        prob = self.softmax(pred.view(-1,self.class_num))
        prob = prob.clamp(min=0.0001,max=1.0)
        
        if pred.shape == target.shape:
            target_ = target
            # y = target
        else:
            if pred.is_cuda: 
                target_ = torch.zeros(target.size(0), self.class_num).cuda()
            else:
                target_ = torch.zeros(target.size(0), self.class_num)
            target_.scatter_(1, target.view(-1, 1).long(), 1.)
    
        batch_loss = - prob.log().float() * target_.float()
        
        batch_loss = batch_loss.sum(dim=1)
        
        if self.class_weight is not None:
            weight = self.class_weight[target].view((1, pred.shape[0]))
            if pred.is_cuda:
                weight = weight.cuda()
            batch_loss = batch_loss * weight  #.to(self.device)

        if self.size_average:
            loss = batch_loss.mean()
        else:
            loss = batch_loss.sum()

        return loss

class CrossEntropyLossOneHot(nn.Module):
    def __init__(self):
        super(CrossEntropyLossOneHot, self).__init__()
        self.log_softmax = nn.LogSoftmax(dim=-1)

    def forward(self, preds, labels):
        return torch.mean(torch.sum(-labels * self.log_softmax(preds), -1))
