import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
# import torchvision as tv
# import torchvision.transforms as transforms
from args import *
from wheel import *



class A_Softmax(nn.Module):
    """A-Sofmtax layer
    ref:
    - ICML 2016 | Large-Margin Softmax Loss for Convolutional Neural Networks
    - CVPR 2017 | SphereFace: Deep Hypersphere Embedding for Face Recognition
    - https://github.com/Joyako/SphereFace-pytorch/blob/master/module/sphere_face.py#L61
    - https://github.com/clcarwin/sphereface_pytorch/blob/master/net_sphere.py#L13
    """

    def __init__(self, in_features, out_features, margin=4):
        super(A_Softmax, self).__init__()
        self.in_features = in_features
        self.out_features = out_features
        self.margin = margin
        self.weight = Parameter(torch.Tensor(out_features, in_features))
        self.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-5).mul_(1e5)

    def cos_mx(self, x):
        """expansion of cos(mx)"""
        m = self.margin
        if 0 == m :
            return torch.ones_like(x).to(x.device).to(x.dtype)  # x ** 0
        elif 1 == m:
            return x  # x ** 1
        elif 2 == m:
            return 2 * x ** 2 - 1
        elif 3 == m:
            return 4 * x ** 3 - 3 * x
        elif 4 == m:
            return 8 * x ** 4 - 8 * x ** 2 + 1
        elif 5 == m:
            return 16 * x ** 5 - 20 * x ** 3 + 5 * x
        else:
            raise Exception("A_Softmax: cos_mx: margin out of bound: {}".format(self.margin))

    def forward(self, x):
        """input:
        - x: [n, d]
        output:
        - ||x|| * cos(theta), [n, c]
        - ||x|| * phi(theta), [n, c]
        """
        W = self.weight.renorm(2, 1, 1e-5).mul(1e5)  # [c, d]
        # W = self.weight
        w_norm = W.norm(2, 1, keepdim=True)
        x_norm = x.norm(2, 1, keepdim=True)  # [n, 1]
        # print("W norm:", w_norm.max(), w_norm.min())
        # x_norm = x.norm(2, 1)
        # print("x norm:", x_norm.max(), x_norm.min())
        # assert not wrap_check(W)
        # assert not wrap_check(x)
        # dot(x, W.T) = ||x|| * ||W|| * cos(theta)
        # cos_theta = cos(x, W)
        cos_theta = x.mm(W.T) / x_norm / w_norm.T
        cos_theta = cos_theta.clamp(-1, 1)
        # assert not wrap_check(cos_theta)
        cos_m_theta = self.cos_mx(cos_theta)
        # k * pi / m <= theta <= (k + 1) * pi / m
        theta = cos_theta.acos().detach()
        k = (self.margin * theta / math.pi).floor()
        # phi(x) = (-1)^k * cos(mx) - 2k
        phi_theta = (-1.) ** k * cos_m_theta - 2 * k

        _cos = x_norm * cos_theta
        _phi = x_norm * phi_theta
        return _cos, _phi


class ScaledCos(nn.Module):
    """y = s * W^T x / ||W|| / ||x||
    used as Linear layer
    ref: https://github.com/happynear/NormFace/blob/master/prototxt/scaled_cosine_softmax.prototxt#L35
    """

    def __init__(self, in_features, out_features, scale=20, train_scale=False):
        super(ScaledCos, self).__init__()
        self.in_features = in_features
        self.out_features = out_features
        if train_scale:
            self.scale = Parameter(torch.as_tensor(scale).float())
        else:
            self.scale = scale
        self.weight = Parameter(torch.Tensor(out_features, in_features))
        self.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-5).mul_(1e5)

    def forward(self, x):
        return self.scale * cos(x, self.weight)


class NormEuclidean(nn.Module):
    """y = s * [(W / ||W||) - (x / ||x||)]^2
    used as Linear layer
    ref: https://github.com/happynear/NormFace/blob/master/prototxt/scaled_cosine_softmax.prototxt#L35
    """

    def __init__(self, in_features, out_features):#, scale=1, train_scale=False):
        super(NormEuclidean, self).__init__()
        self.in_features = in_features
        self.out_features = out_features
        self.weight = Parameter(torch.Tensor(out_features, in_features))
        self.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-5).mul_(1e5)
        # if train_scale:
        #     self.scale = Parameter(torch.as_tensor(scale).float())
        # else:
        #     self.scale = scale

    def forward(self, x):
        X_n = F.normalize(x, p=2, dim=1) #* self.scale
        W_n = F.normalize(self.weight, p=2, dim=1) #* self.scale
        D = euclidean(X_n, W_n)#.clamp(0, 4)  # [n, c]
        return D
 
 
class LeNetPP(nn.Module):
    """LeNet++
    https://github.com/ydwen/caffe-face/blob/caffe-face/mnist_example/mnist_train_test.prototxt
    """

    def __init__(self):
        """in: [n, 1, 28, 28]"""
        super(LeNetPP, self).__init__()
        self.layer_1 = nn.Sequential(
            nn.Conv2d(1, 32, 5, 1, 2),
            nn.BatchNorm2d(32),
            nn.PReLU(),
            nn.Conv2d(32, 32, 5, 1, 2),
            nn.BatchNorm2d(32),
            nn.PReLU(),
            nn.MaxPool2d(2, 2)
        )
        self.layer_2 = nn.Sequential(
            nn.Conv2d(32, 64, 5, 1, 2),
            nn.BatchNorm2d(64),
            nn.PReLU(),
            nn.Conv2d(64, 64, 5, 1, 2),
            nn.BatchNorm2d(64),
            nn.PReLU(),
            nn.MaxPool2d(2, 2)
        )
        self.layer_3 = nn.Sequential(
            nn.Conv2d(64, 128, 5, 1, 2),
            nn.BatchNorm2d(128),
            nn.PReLU(),
            nn.Conv2d(128, 128, 5, 1, 2),
            nn.BatchNorm2d(128),
            nn.PReLU(),
            nn.MaxPool2d(3, 2)
        )
        self.fea_layer = nn.Sequential(
            nn.Linear(1152, args.dim_fea),
            nn.PReLU()
        )
        if args.model_type in ("naive", "centre"):
            _bias = False if (0 == args.bias) else True
            self.clf_layer = nn.Linear(args.dim_fea, args.n_class, _bias)
        elif args.model_type == "a-softmax":
            self.clf_layer = A_Softmax(args.dim_fea, args.n_class, args.margin)
        elif args.model_type == "normface":
            self.clf_layer = ScaledCos(args.dim_fea, args.n_class, args.scale, args.train_scale)
        elif args.model_type == "am-softmax":
            self.clf_layer = ScaledCos(args.dim_fea, args.n_class, 1, False)

        elif args.model_type in ("c-contrastive", "c-triplet"):
            self.clf_layer = NormEuclidean(args.dim_fea, args.n_class)

    def forward(self, x, norm_fea=False):
        x = self.layer_1(x)
        x = self.layer_2(x)
        x = self.layer_3(x)
        x = x.view(x.size(0), -1)
        # print("conv out:", x.size())  # [n, 1152]
        feature = self.fea_layer(x)
        if norm_fea:
            feature = F.normalize(feature, 2, 1)
        logit = self.clf_layer(feature)

        return feature, logit


class AngularSoftmaxLoss(nn.Module):
    """A-Sofmtax loss (as criterion)
    ref:
    - ICML 2016 | Large-Margin Softmax Loss for Convolutional Neural Networks
    - CVPR 2017 | SphereFace: Deep Hypersphere Embedding for Face Recognition
    - https://github.com/Joyako/SphereFace-pytorch/blob/master/module/loss.py#L11
    - https://github.com/clcarwin/sphereface_pytorch/blob/master/net_sphere.py#L60
    """

    def __init__(self, lambda_min=5., lambda_max=1500., gamma=0.000025, power=35):
        super(AngularSoftmaxLoss, self).__init__()
        self.gamma = gamma
        self.power = power
        self.lambda_min = lambda_min
        self.lambda_max = lambda_max
        self.Lambda = lambda_max
        self.it = 0

    def forward(self, logit, label):
        """logit = (cos_theta, phi_theta): both [n, c], output of A-Softmax layer
        label: [n], sparse class ID
        lambda := max { lambda_min, lambda_max / (1 + gamma * #iter)^power }
        """
        cos_theta, phi_theta = logit
        self.it += 1
        self.Lambda = max(
            self.lambda_min, self.lambda_max / (1 + 0.1 * self.it))
        # self.Lambda = max(self.lambda_min,
        #     self.lambda_max / (1 + self.gamma * self.it) ** self.power)
        idx_phi = one_hot(label, cos_theta.size(1)).bool()  # one-hot label
        # f = [lambda * ||x|| * cos(theta) + ||x|| * phi(theta)] / (1 + lambda)
        # where ||x|| is already been multiplied in A-Softmax layer
        f = cos_theta # * 1.0
        f[idx_phi] -= cos_theta[idx_phi] / (1.0 + self.Lambda)
        f[idx_phi] += phi_theta[idx_phi] / (1.0 + self.Lambda)
        logit = F.log_softmax(f, -1).gather(1, label.view(-1, 1)).view(-1)
        # assert not check_nan_inf(logit)

        pt = logit.exp()
        loss = -1 * (1 - pt) ** self.gamma * logit
        # loss = - logit
        # assert not check_nan_inf(loss)
        loss = loss.mean()
        return loss


def centre_loss(X, L, C):
    """centre loss
    X: [n, d], features
    L: [n], labels of features
    C: [c, d], centres, order by class ID
    """
    _idx = L.long()
    centre = C[_idx]
    D = (X - centre) ** 2
    loss_centre = 0.5 * D.sum(1).mean()
    return loss_centre


def C_contrastive_loss(D, L,
                  margin_pos=0, margin_neg=1,
                  weight_pos=1, weight_neg=1,
                  pos_outlier_threshold=4):
    """C-contrastive loss from NormFace
    D: [n, c], (norm-euclidean) distance between features & weight vectors,
                output of NormEuclidean
    L: [n], class ID
    ref:
    - https://github.com/happynear/caffe-windows/blob/ms/src/caffe/layers/general_constrastive_loss_layer.cpp#L52
    - https://github.com/happynear/caffe-windows/blob/ms/src/caffe/proto/caffe.proto#L1738
    """
    loss_pos = (D - margin_pos).clamp(min=0)#, pos_outlier_threshold)
    loss_neg = (margin_neg - D).clamp(min=0)

    _n_class = D.size(1)
    mask_pos = one_hot(L, _n_class).float()#bool()  # [n, c]
    # loss = torch.where(mask_pos, weight_pos * loss_pos, weight_neg * loss_neg)
    # loss = loss.to(D.device).sum(1).mean()
    loss_pos = weight_pos * mask_pos * loss_pos
    loss_neg = weight_neg * ((1 - mask_pos) * loss_neg).max(1)[0]
    loss = (loss_pos.sum(1) + loss_neg).mean()
    return loss


def C_triplet_loss(D, L, margin=1):
    """C-triplet loss from NormFace
    D: [n, c], (norm-euclidean) distance between features & weight vectors,
                output of NormEuclidean
    L: [n], class ID
    ref:
    - https://github.com/happynear/caffe-windows/blob/ms/src/caffe/layers/general_triplet_loss_layer.cpp#L38
    - https://github.com/happynear/caffe-windows/blob/ms/src/caffe/proto/caffe.proto#L1772
    """
    D_pos = D.unsqueeze(2)
    D_neg = D.unsqueeze(1)
    kernel = D_pos + margin - D_neg

    _n_class = W.size(0)
    mask_triplet = triplet_mask(L, torch.arange(_n_class).to(L.device), sparse=True)  # [n, c, c]
    loss_triplet = 0.5 * mask_triplet * kernel
    # loss_triplet[loss_triplet < 0] = 0.0
    loss_triplet = torch.max(loss_triplet, torch.zeros_like(loss_triplet).to(X.device))

    n_pos = (loss_triplet > 1e-16).float().sum()
    return loss_triplet.sum() / (n_pos + 1e-16)


def am_softmax_loss(cos_theta, label, scale=30, margin=0.35):
    """AM-Softmax & cross entropy loss
    cos_theta: [n, c], output of ScaledCos
    label: [n], class ID
    """
    _n_class = cos_theta.size(1)
    mask_phi = one_hot(label, _n_class).bool()
    logit = scale * cos_theta.where(mask_phi.logical_not(), cos_theta - margin)
    # return logit
    return F.cross_entropy(logit, label)

