import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Parameter
from scipy.special import binom
import math


# Reference
# https://github.com/KaiyangZhou/pytorch-center-loss/blob/master/main.py
# https://github.com/4uiiurz1/pytorch-adacos

class AdaCos(nn.Module):
    def __init__(self, num_features, num_classes, m=0.50):
        super(AdaCos, self).__init__()
        self.num_features = num_features
        self.n_classes = num_classes
        self.s = math.sqrt(2) * math.log(num_classes - 1)
        self.m = m
        self.weight = Parameter(torch.FloatTensor(num_classes, num_features))
        nn.init.xavier_uniform_(self.weight)

    def forward(self, input, label=None):
        # normalize features
        x = F.normalize(input)
        # normalize weights
        W = F.normalize(self.weight)
        # dot product
        logits = F.linear(x, W)
        if label is None:
            return logits
        # feature re-scale
        theta = torch.acos(torch.clamp(logits, -1.0 + 1e-7, 1.0 - 1e-7))
        one_hot = torch.zeros_like(logits)
        one_hot.scatter_(1, label.view(-1, 1).long(), 1)
        with torch.no_grad():
            B_avg = torch.where(one_hot < 1, torch.exp(self.s * logits), torch.zeros_like(logits))
            B_avg = torch.sum(B_avg) / input.size(0)
            # print(B_avg)
            theta_med = torch.median(theta[one_hot == 1])
            self.s = torch.log(B_avg) / torch.cos(torch.min(math.pi / 4 * torch.ones_like(theta_med), theta_med))
        output = self.s * logits

        return F.cross_entropy(output, label)

    def extra_repr(self):
        return 's={},m={}'.format(self.s, self.m)

class ArcFace(nn.Module):
    def __init__(self, num_features, num_classes, s=30.0, m=0.50):
        super(ArcFace, self).__init__()
        self.num_features = num_features
        self.n_classes = num_classes
        self.s = s
        self.m = m
        self.weight = Parameter(torch.FloatTensor(num_classes, num_features))
        nn.init.xavier_uniform_(self.weight)

    def forward(self, input, label=None):
        # normalize features
        x = F.normalize(input)
        # normalize weights
        W = F.normalize(self.weight)
        # dot product
        logits = F.linear(x, W)
        if label is None:
            return logits
        # add margin
        theta = torch.acos(torch.clamp(logits, -1.0 + 1e-7, 1.0 - 1e-7))
        target_logits = torch.cos(theta + self.m)
        one_hot = torch.zeros_like(logits)
        one_hot.scatter_(1, label.view(-1, 1).long(), 1)
        output = logits * (1 - one_hot) + target_logits * one_hot
        # feature re-scale
        output *= self.s

        return F.cross_entropy(output, label)

    def extra_repr(self):
        return 's={},m={}'.format(self.s, self.m)


class SphereFace(nn.Module):
    def __init__(self, num_features, num_classes, s=30.0, m=1.35):
        super(SphereFace, self).__init__()
        self.num_features = num_features
        self.n_classes = num_classes
        self.s = s
        self.m = m
        self.weight = Parameter(torch.FloatTensor(num_classes, num_features))
        nn.init.xavier_uniform_(self.weight)

    def forward(self, input, label=None):
        # normalize features
        x = F.normalize(input)
        # normalize weights
        W = F.normalize(self.weight)
        # dot product
        logits = F.linear(x, W)
        if label is None:
            return logits
        # add margin
        theta = torch.acos(torch.clamp(logits, -1.0 + 1e-7, 1.0 - 1e-7))
        target_logits = torch.cos(self.m * theta)
        one_hot = torch.zeros_like(logits)
        one_hot.scatter_(1, label.view(-1, 1).long(), 1)
        output = logits * (1 - one_hot) + target_logits * one_hot
        # feature re-scale
        output *= self.s

        return F.cross_entropy(output, label)

    def extra_repr(self):
        return 's={},m={}'.format(self.s, self.m)


class CosFace(nn.Module):
    def __init__(self, num_features, num_classes, s=30.0, m=0.35):
        super(CosFace, self).__init__()
        self.num_features = num_features
        self.n_classes = num_classes
        self.s = s
        self.m = m
        self.weight = Parameter(torch.FloatTensor(num_classes, num_features))
        nn.init.xavier_uniform_(self.weight)

    def forward(self, input, label=None):
        # normalize features
        x = F.normalize(input)
        # normalize weights
        W = F.normalize(self.weight)
        # dot product
        logits = F.linear(x, W)
        if label is None:
            return logits
        # add margin
        target_logits = logits - self.m
        one_hot = torch.zeros_like(logits)
        one_hot.scatter_(1, label.view(-1, 1).long(), 1)
        output = logits * (1 - one_hot) + target_logits * one_hot
        # feature re-scale
        output *= self.s

        return F.cross_entropy(output, label)

    def extra_repr(self):
        return 's={},m={}'.format(self.s, self.m)


class CenterLoss(nn.Module):
    """Center loss.

    Reference:
    Wen et al. A Discriminative Feature Learning Approach for Deep Face Recognition. ECCV 2016.

    Args:
        num_classes (int): number of classes.
        feat_dim (int): feature dimension.
    """

    def __init__(self, in_features=256, num_classes=10, lam=1, use_gpu=True):
        super(CenterLoss, self).__init__()
        self.num_classes = num_classes
        self.in_features = in_features
        self.weight = nn.Parameter(torch.Tensor(num_classes, in_features))
        nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))

        self.feat_dim = in_features
        self.lam = lam
        self.use_gpu = use_gpu
        self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim))
        nn.init.kaiming_uniform_(self.centers, a=math.sqrt(5))

    def forward(self, x, labels=None):
        logit = F.linear(x, self.weight)
        if labels is None:
            return logit
        loss1 = F.cross_entropy(logit, labels)
        """
        Args:
            x: feature matrix with shape (batch_size, feat_dim).
            labels: ground truth labels with shape (batch_size).
        """
        batch_size = x.size(0)
        distmat = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(batch_size, self.num_classes) + \
                  torch.pow(self.centers, 2).sum(dim=1, keepdim=True).expand(self.num_classes, batch_size).t()

        distmat.addmm_(1, -2, x, self.centers.t())

        classes = torch.arange(self.num_classes).long()
        if self.use_gpu: classes = classes.cuda()
        labels = labels.unsqueeze(1).expand(batch_size, self.num_classes)
        mask = labels.eq(classes.expand(batch_size, self.num_classes))

        dist = distmat * mask.float()
        loss = dist.clamp(min=1e-12, max=1e+12).sum() / batch_size

        return self.lam * loss + loss1


class Arc1Loss(nn.Module):
    def __init__(self, num_features, num_classes, s = 4.5):
        super().__init__()
        self.num_features = num_features
        self.n_classes = num_classes
        self.register_buffer('s', (torch.Tensor([s])))
        self.weight = Parameter(torch.FloatTensor(num_classes, num_features))
        nn.init.xavier_uniform_(self.weight)

    def forward(self, inputs, label=None):
        # normalize features
        x = F.normalize(inputs)
        # normalize weights
        W = F.normalize(self.weight)
        # dot product
        logits = F.linear(x, W)
        if label is None:
            return logits

        angle = logits.acos()
        output = ( - angle)
        return F.cross_entropy(self.s *output, label)

    def extra_repr(self):
        return 's={}'.format(self.s.item())

class Arc2Loss(nn.Module):
    def __init__(self, num_features, num_classes, max_m = .5):
        super().__init__()
        self.num_features = num_features
        self.n_classes = num_classes
        self.max_m=max_m
        self.s =100
        self.k = 2.10100300
        self.weight = Parameter(torch.FloatTensor(num_classes, num_features))
        nn.init.xavier_uniform_(self.weight)

    def forward(self, inputs, label=None):
        # normalize features
        x = F.normalize(inputs)
        # normalize weights
        W = F.normalize(self.weight)
        # dot product
        logits = F.linear(x, W)
        if label is None:
            return logits

        angle = logits.acos()

        with torch.no_grad():
            index = torch.arange(inputs.size(0))
            intra = angle[index,label]
            inter = angle.sum(dim=1)-intra
            m_intra = intra.mean()
            m_inter = inter.sum()/(x.size(0)*(self.n_classes-1))
            m = (m_inter-m_intra).clamp(0.01,self.max_m)
            self.s = self.k/m

        return F.cross_entropy(- angle * self.s, label)

    def extra_repr(self):
        return 's={}'.format(self.s.item())


class ArcLoss(nn.Module):
    def __init__(self, num_features, num_classes, s=4.5):
        super().__init__()
        self.num_features = num_features
        self.n_classes = num_classes
        self.weight = Parameter(torch.FloatTensor(num_classes, num_features))
        self.register_parameter('s', Parameter(torch.Tensor([s])))
        nn.init.xavier_uniform_(self.weight)

    def forward(self, inputs, label=None):
        # normalize features
        x = F.normalize(inputs)
        # normalize weights
        W = F.normalize(self.weight)
        # dot product
        logits = F.linear(x, W)
        if label is None:
            return logits

        theta = torch.acos(torch.clamp(logits, -1.0 + 1e-7, 1.0 - 1e-7))
        return F.cross_entropy(-self.s * theta, label)

    def extra_repr(self):
        return 's={}'.format(self.s.item())

class SoftMarginLoss(nn.Module):
    def __init__(self, in_features, out_features, ):
        super().__init__()
        self.in_features = in_features
        self.out_features = out_features
        self.weight = nn.Parameter(torch.Tensor(out_features, in_features))
        self.reset_parameters()
        nn.MultiLabelSoftMarginLoss()

    def reset_parameters(self):
        nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))

    def forward(self, input, label=None):
        x = F.linear(input, self.weight)
        if label is None:
            return x
        # multi-class classification hinge loss (margin-based loss)
        return F.multi_margin_loss(x, label)


class SoftLoss(nn.Module):
    def __init__(self, in_features, out_features, ):
        super().__init__()
        self.in_features = in_features
        self.out_features = out_features
        self.weight = nn.Parameter(torch.Tensor(out_features, in_features))
        self.reset_parameters()

    def reset_parameters(self):
        nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))

    def forward(self, input, label=None):
        x = F.linear(input, self.weight)
        if label is None:
            return x
        return F.cross_entropy(x, label)
from torch.autograd import Variable
class LSoftmaxLinear(nn.Module):

    def __init__(self, input_dim, output_dim, m=1):
        super().__init__()
        self.input_dim = input_dim
        self.output_dim = output_dim
        self.margin = m

        self.weight = nn.Parameter(torch.FloatTensor(input_dim, output_dim))

        self.divisor = math.pi / self.margin
        self.coeffs = binom(self.margin, range(0, self.margin + 1, 2))
        self.cos_exps = range(self.margin, -1, -2)
        self.sin_sq_exps = range(len(self.cos_exps))
        self.signs = [1]
        for i in range(1, len(self.sin_sq_exps)):
            self.signs.append(self.signs[-1] * -1)
        self.reset_parameters()

    def reset_parameters(self):
        nn.init.kaiming_normal(self.weight.data.t())

    def find_k(self, cos):
        acos = cos.acos()
        k = (acos / self.divisor).floor().detach()
        return k

    def forward(self, input, target=None):
        if self.training:
            assert target is not None
            logit = input.matmul(self.weight)
            batch_size = logit.size(0)
            logit_target = logit[range(batch_size), target]
            weight_target_norm = self.weight[:, target].norm(p=2, dim=0)
            input_norm = input.norm(p=2, dim=1)
            # norm_target_prod: (batch_size,)
            norm_target_prod = weight_target_norm * input_norm
            # cos_target: (batch_size,)
            cos_target = logit_target / (norm_target_prod + 1e-10)
            sin_sq_target = 1 - cos_target**2

            num_ns = self.margin//2 + 1
            # coeffs, cos_powers, sin_sq_powers, signs: (num_ns,)
            coeffs = Variable(input.data.new(self.coeffs))
            cos_exps = Variable(input.data.new(self.cos_exps))
            sin_sq_exps = Variable(input.data.new(self.sin_sq_exps))
            signs = Variable(input.data.new(self.signs))

            cos_terms = cos_target.unsqueeze(1) ** cos_exps.unsqueeze(0)
            sin_sq_terms = (sin_sq_target.unsqueeze(1)
                            ** sin_sq_exps.unsqueeze(0))

            cosm_terms = (signs.unsqueeze(0) * coeffs.unsqueeze(0)
                          * cos_terms * sin_sq_terms)
            cosm = cosm_terms.sum(1)
            k = self.find_k(cos_target)

            ls_target = norm_target_prod * (((-1)**k * cosm) - 2*k)
            logit[range(batch_size), target] = ls_target
            return F.cross_entropy(logit, target)
        else:
            assert target is None
            return input.matmul(self.weight)


def get_loss(loss_func,hidden, class_number,args):
    if loss_func == 'soft':
        criterion = SoftLoss(hidden,class_number)
    elif loss_func == 'hinge':
        criterion = SoftMarginLoss(hidden, class_number)
    elif loss_func == 'arc':
        criterion = ArcLoss(hidden,class_number,s=args.s)
    elif loss_func == 'arc1':
        criterion = Arc1Loss(hidden,class_number,s=args.s)
    elif loss_func == 'arc2':
        criterion = Arc2Loss(hidden,class_number,args.m)
    elif loss_func == 'sphere':
        criterion = SphereFace(hidden,class_number,s=args.s,m=args.m)
    elif loss_func == 'arcface':
        criterion = ArcFace(hidden,class_number,s=args.s,m=args.m)
    elif loss_func == 'adacos':
        criterion = AdaCos(hidden, class_number, m=args.m)
    elif loss_func == 'cosface':
        criterion = CosFace(hidden, class_number, s=args.s, m=args.m)
    elif loss_func == 'center':
        criterion = CenterLoss(hidden, class_number, args.m,False)
    elif loss_func=='lsoft':
        criterion = LSoftmaxLinear(hidden,class_number,m=args.m)
#     elif loss_func == 'original':
#         cnn = ResNet18(num_classes=class_number)
#         criterion = CrossEntropyLoss1()
    else:
        raise 'wrong ' + loss_func
    return criterion