import warnings
import torch
import random
import itertools as it
import numpy as np
import torch.nn.functional as F
from torch.nn.parameter import Parameter
import math

warnings.filterwarnings("ignore")
"""================================================================================================="""


# LOSS SELECTION FUNCTION #####################
def loss_select(loss, opt, to_optim):
    """
    Selection function which returns the respective criterion while appending to list of trainable parameters if required.

    Args:
        loss:     str, name of loss function to return.
        opt:      argparse.Namespace, contains all training-specific parameters.
        to_optim: list of trainable parameters. Is extend if loss function contains those as well.
    Returns:
        criterion (torch.nn.Module inherited), to_optim (optionally appended)
    """
    if loss == 'triplet':
        loss_params = {'margin': opt.margin, 'sampling_method': opt.sampling_method}
        criterion = TripletLoss(**loss_params)
    elif loss == 'npair':
        loss_params = {'l2': opt.l2npair}
        criterion = NPairLoss(**loss_params)
    elif loss == 'marginloss':
        loss_params = {
            'margin': opt.margin,
            'nu': opt.nu,
            'beta': opt.beta,
            'n_classes': opt.num_classes,
            'sampling_method': opt.sampling_method
        }
        criterion = MarginLoss(**loss_params)
        to_optim += [{'params': criterion.parameters(), 'lr': opt.beta_lr, 'weight_decay': 0}]
    elif loss == 'proxynca':
        loss_params = {
            'num_proxies': opt.num_classes,
            'embedding_dim': opt.embed_dim if 'num_cluster' in vars(opt).keys() else opt.embed_dim
        }
        criterion = ProxyNCALoss(**loss_params)
        to_optim += [{'params': criterion.parameters(), 'lr': opt.proxy_lr}]
    elif loss == 'crossentropy':
        loss_params = {'n_classes': opt.num_classes, 'inp_dim': opt.embed_dim}
        criterion = CEClassLoss(**loss_params)
        to_optim += [{'params': criterion.parameters(), 'lr': opt.lr, 'weight_decay': 0}]
    elif loss == 'pal':
        # nb_classes, sz_embed, mrg = 0.1, alpha = 32
        loss_params = {
            'nb_classes': opt.num_classes,
            'sz_embed': opt.embed_dim if 'num_cluster' in vars(opt).keys() else opt.embed_dim,
            'mrg': opt.pal_margin,
            'alpha': opt.pal_alpha
        }
        criterion = ProxyAnchorLoss(**loss_params)
        # print('before: {}'.format(to_optim))
        to_optim += [{'params': criterion.parameters(), 'lr': opt.pal_lr}]
        # print('after: {}'.format(to_optim))
    elif loss == 'ms':
        loss_params = {'margin': opt.ms_margin, 'thresh': opt.ms_thresh}
        criterion = MultiSimilarityLoss(**loss_params)
    elif loss == 'stl':
        loss_params = {
            'la': opt.stl_la,
            'gamma': opt.stl_gamma,
            'tau': opt.stl_tau,
            'margin': opt.stl_margin,
            'K': opt.stl_K,
            'dim': opt.embed_dim,
            'cN': opt.num_classes
        }
        criterion = SoftTripleLoss(**loss_params)
        to_optim += [{'params': criterion.parameters(), 'lr': opt.lr}]
    else:
        raise Exception('Loss {} not available!'.format(loss))

    return criterion, to_optim


"""================================================================================================="""


class TupleSampler():
    """
    Container for all sampling methods that can be used in conjunction with the respective loss functions.
    Based on batch-wise sampling, i.e. given a batch of training data, sample useful data tuples that are
    used to train the network more efficiently.
    """
    def __init__(self, method='random'):
        """
        Args:
            method: str, name of sampling method to use.
        Returns:
            Nothing!
        """
        self.method = method
        if method == 'semihard':
            self.give = self.semihardsampling
        if method == 'softhard':
            self.give = self.softhardsampling
        elif method == 'distance':
            self.give = self.distanceweightedsampling
        elif method == 'npair':
            self.give = self.npairsampling
        elif method == 'random':
            self.give = self.randomsampling

    def randomsampling(self, batch, labels):
        """
        This methods finds all available triplets in a batch given by the classes provided in labels, and randomly
        selects <len(batch)> triplets.

        Args:
            batch:  np.ndarray or torch.Tensor, batch-wise embedded training samples.
            labels: np.ndarray or torch.Tensor, ground truth labels corresponding to batch.
        Returns:
            list of sampled data tuples containing reference indices to the position IN THE BATCH.
        """
        if isinstance(labels, torch.Tensor):
            labels = labels.detach().cpu().numpy()
        unique_classes = np.unique(labels)
        indices = np.arange(len(batch))
        class_dict = {i: indices[labels == i] for i in unique_classes}

        sampled_triplets = [list(it.product([x], [x], [y for y in unique_classes if x != y])) for x in unique_classes]
        sampled_triplets = [x for y in sampled_triplets for x in y]

        sampled_triplets = [[x for x in list(it.product(*[class_dict[j] for j in i])) if x[0] != x[1]]
                            for i in sampled_triplets]
        sampled_triplets = [x for y in sampled_triplets for x in y]

        # NOTE: The number of possible triplets is given by #unique_classes*(2*(samples_per_class-1)!)*(#unique_classes-1)*samples_per_class
        sampled_triplets = random.sample(sampled_triplets, batch.shape[0])
        return sampled_triplets

    def semihardsampling(self, batch, labels, margin=0.2):
        if isinstance(labels, torch.Tensor):
            labels = labels.detach().cpu().numpy()
        bs = batch.size(0)
        # Return distance matrix for all elements in batch (BSxBS)
        distances = self.pdist(batch.detach()).detach().cpu().numpy()

        positives, negatives = [], []
        anchors = []
        for i in range(bs):
            l, d = labels[i], distances[i]
            neg = labels != l
            pos = labels == l

            anchors.append(i)
            pos[i] = False
            p = np.random.choice(np.where(pos)[0])
            positives.append(p)

            # Find negatives that violate tripet constraint semi-negatives
            neg_mask = np.logical_and(neg, d > d[p])
            neg_mask = np.logical_and(neg_mask, d < margin + d[p])
            if neg_mask.sum() > 0:
                negatives.append(np.random.choice(np.where(neg_mask)[0]))
            else:
                negatives.append(np.random.choice(np.where(neg)[0]))

        sampled_triplets = [[a, p, n] for a, p, n in zip(anchors, positives, negatives)]
        return sampled_triplets

    def softhardsampling(self, batch, labels):
        """
        This methods finds all available triplets in a batch given by the classes provided in labels, and select
        triplets based on semihard sampling introduced in 'https://arxiv.org/pdf/1503.03832.pdf'.

        Args:
            batch:  np.ndarray or torch.Tensor, batch-wise embedded training samples.
            labels: np.ndarray or torch.Tensor, ground truth labels corresponding to batch.
        Returns:
            list of sampled data tuples containing reference indices to the position IN THE BATCH.
        """
        if isinstance(labels, torch.Tensor):
            labels = labels.detach().cpu().numpy()
        bs = batch.size(0)
        # Return distance matrix for all elements in batch (BSxBS)
        distances = self.pdist(batch.detach()).detach().cpu().numpy()
        # print(distances)

        positives, negatives = [], []
        anchors = []
        for i in range(bs):
            l, d = labels[i], distances[i]
            anchors.append(i)
            # 1 for batchelements with label l
            neg = labels != l
            pos = labels == l
            # 0 for current anchor
            pos[i] = False

            # Find negatives that violate triplet constraint semi-negatives
            neg_mask = np.logical_and(neg, d < d[np.where(pos)[0]].max())
            # Find positives that violate triplet constraint semi-hardly
            pos_mask = np.logical_and(pos, d > d[np.where(neg)[0]].min())

            if pos_mask.sum() > 0:
                positives.append(np.random.choice(np.where(pos_mask)[0]))
            else:
                positives.append(np.random.choice(np.where(pos)[0]))

            if neg_mask.sum() > 0:
                negatives.append(np.random.choice(np.where(neg_mask)[0]))
            else:
                negatives.append(np.random.choice(np.where(neg)[0]))

        sampled_triplets = [[a, p, n] for a, p, n in zip(anchors, positives, negatives)]
        return sampled_triplets

    def distanceweightedsampling(self, batch, labels, lower_cutoff=0.5, upper_cutoff=1.4):
        """
        This methods finds all available triplets in a batch given by the classes provided in labels, and select
        triplets based on distance sampling introduced in 'Sampling Matters in Deep Embedding Learning'.

        Args:
            batch:  np.ndarray or torch.Tensor, batch-wise embedded training samples.
            labels: np.ndarray or torch.Tensor, ground truth labels corresponding to batch.
            lower_cutoff: float, lower cutoff value for negatives that are too close to anchor embeddings. Set to literature value. They will be assigned a zero-sample probability.
            upper_cutoff: float, upper cutoff value for positives that are too far away from the anchor embeddings. Set to literature value. They will be assigned a zero-sample probability.
        Returns:
            list of sampled data tuples containing reference indices to the position IN THE BATCH.
        """
        if isinstance(labels, torch.Tensor):
            labels = labels.detach().cpu().numpy()
        bs = batch.shape[0]

        distances = self.pdist(batch.detach()).clamp(min=lower_cutoff)

        positives, negatives = [], []

        for i in range(bs):
            pos = labels == labels[i]
            q_d_inv = self.inverse_sphere_distances(batch, distances[i], labels, labels[i])
            # Sample positives randomly
            pos[i] = 0
            positives.append(np.random.choice(np.where(pos)[0]))
            # Sample negatives by distance
            negatives.append(np.random.choice(bs, p=q_d_inv))

        sampled_triplets = [[a, p, n] for a, p, n in zip(list(range(bs)), positives, negatives)]
        return sampled_triplets

    def npairsampling(self, batch, labels):
        """
        This methods finds N-Pairs in a batch given by the classes provided in labels in the
        creation fashion proposed in 'Improved Deep Metric Learning with Multi-class N-pair Loss Objective'.

        Args:
            batch:  np.ndarray or torch.Tensor, batch-wise embedded training samples.
            labels: np.ndarray or torch.Tensor, ground truth labels corresponding to batch.
        Returns:
            list of sampled data tuples containing reference indices to the position IN THE BATCH.
        """
        if isinstance(labels, torch.Tensor):
            labels = labels.detach().cpu().numpy()

        label_set, count = np.unique(labels, return_counts=True)
        label_set = label_set[count >= 2]
        pos_pairs = np.array([np.random.choice(np.where(labels == x)[0], 2, replace=False) for x in label_set])
        neg_tuples = []

        for idx in range(len(pos_pairs)):
            neg_tuples.append(pos_pairs[np.delete(np.arange(len(pos_pairs)), idx), 1])

        neg_tuples = np.array(neg_tuples)

        sampled_npairs = [[a, p, *list(neg)] for (a, p), neg in zip(pos_pairs, neg_tuples)]
        return sampled_npairs

    def pdist(self, A):
        """
        Efficient function to compute the distance matrix for a matrix A.

        Args:
            A:   Matrix/Tensor for which the distance matrix is to be computed.
            eps: float, minimal distance/clampling value to ensure no zero values.
        Returns:
            distance_matrix, clamped to ensure no zero values are passed.
        """
        prod = torch.mm(A, A.t())
        norm = prod.diag().unsqueeze(1).expand_as(prod)
        res = (norm + norm.t() - 2 * prod).clamp(min=0)
        return res.clamp(min=0).sqrt()

    def inverse_sphere_distances(self, batch, dist, labels, anchor_label):
        """
        Function to utilise the distances of batch samples to compute their
        probability of occurence, and using the inverse to sample actual negatives to the resp. anchor.

        Args:
            batch:        torch.Tensor(), batch for which the sampling probabilities w.r.t to the anchor are computed. Used only to extract the shape.
            dist:         torch.Tensor(), computed distances between anchor to all batch samples.
            labels:       np.ndarray, labels for each sample for which distances were computed in dist.
            anchor_label: float, anchor label
        Returns:
            distance_matrix, clamped to ensure no zero values are passed.
        """
        _, dim = len(dist), batch.shape[-1]
        # negated log-distribution of distances of unit sphere in dimension <dim>
        log_q_d_inv = ((2.0 - float(dim)) * torch.log(dist) - (float(dim - 3) / 2) * torch.log(1.0 - 0.25 *
                                                                                               (dist.pow(2))))
        # Set sampling probabilities of positives to zero
        log_q_d_inv[np.where(labels == anchor_label)[0]] = 0

        q_d_inv = torch.exp(log_q_d_inv - torch.max(log_q_d_inv))  # - max(log) for stability
        # Set sampling probabilities of positives to zero
        q_d_inv[np.where(labels == anchor_label)[0]] = 0

        # NOTE: Cutting of values with high distances made the results slightly worse.
        # q_d_inv[np.where(dist>upper_cutoff)[0]]    = 0

        # Normalize inverted distance for probability distr.
        q_d_inv = q_d_inv / q_d_inv.sum()
        return q_d_inv.detach().cpu().numpy()


"""================================================================================================="""


class TripletLoss(torch.nn.Module):
    def __init__(self, margin=1, sampling_method='random'):
        """
        Basic Triplet Loss as proposed in 'FaceNet: A Unified Embedding for Face Recognition and Clustering'
        Args:
            margin:             float, Triplet Margin - Ensures that positives aren't placed arbitrarily close to the anchor.
                                Similarl, negatives should not be placed arbitrarily far away.
            sampling_method:    Method to use for sampling training triplets. Used for the TupleSampler-class.
        """
        super(TripletLoss, self).__init__()
        self.margin = margin
        self.sampler = TupleSampler(method=sampling_method)

    def triplet_distance(self, anchor, positive, negative):
        """
        Compute triplet loss.

        Args:
            anchor, positive, negative: torch.Tensor(), resp. embeddings for anchor, positive and negative samples.
        Returns:
            triplet loss (torch.Tensor())
        """
        return torch.nn.functional.relu((anchor - positive).pow(2).sum() - (anchor - negative).pow(2).sum() +
                                        self.margin)

    def forward(self, batch, labels):
        """
        Args:
            batch:   torch.Tensor() [(BS x embed_dim)], batch of embeddings
            labels:  np.ndarray [(BS x 1)], for each element of the batch assigns a class [0,...,C-1]
        Returns:
            triplet loss (torch.Tensor(), batch-averaged)
        """
        # Sample triplets to use for training.
        sampled_triplets = self.sampler.give(batch, labels)
        # Compute triplet loss
        loss = torch.stack([
            self.triplet_distance(batch[triplet[0], :], batch[triplet[1], :], batch[triplet[2], :])
            for triplet in sampled_triplets
        ])

        return torch.mean(loss)


"""================================================================================================="""


# Standard N-Pair Loss.
class NPairLoss(torch.nn.Module):
    def __init__(self, l2=0.02):
        """
        Basic N-Pair Loss as proposed in 'Improved Deep Metric Learning with Multi-class N-pair Loss Objective'

        Args:
            l2: float, weighting parameter for weight penality due to embeddings not being normalized.
        Returns:
            Nothing!
        """
        super(NPairLoss, self).__init__()
        self.sampler = TupleSampler(method='npair')
        self.l2 = l2

    def npair_distance(self, anchor, positive, negatives):
        """
        Compute basic N-Pair loss.

        Args:
            anchor, positive, negative: torch.Tensor(), resp. embeddings for anchor, positive and negative samples.
        Returns:
            n-pair loss (torch.Tensor())
        """
        return torch.log(1 + torch.sum(torch.exp(anchor.mm((negatives - positive).transpose(0, 1)))))

    def weightsum(self, anchor, positive):
        """
        Compute weight penalty.
        NOTE: Only need to penalize anchor and positive since the negatives are created based on these.

        Args:
            anchor, positive: torch.Tensor(), resp. embeddings for anchor and positive samples.
        Returns:
            torch.Tensor(), Weight penalty
        """
        return torch.sum(anchor**2 + positive**2)

    def forward(self, batch, labels):
        """
        Args:
            batch:   torch.Tensor() [(BS x embed_dim)], batch of embeddings
            labels:  np.ndarray [(BS x 1)], for each element of the batch assigns a class [0,...,C-1]
        Returns:
            n-pair loss (torch.Tensor(), batch-averaged)
        """
        # Sample N-Pairs
        sampled_npairs = self.sampler.give(batch, labels)
        # Compute basic n=pair loss
        loss = torch.stack([
            self.npair_distance(batch[npair[0]:npair[0] + 1, :], batch[npair[1]:npair[1] + 1, :], batch[npair[2:], :])
            for npair in sampled_npairs
        ])
        # Include weight penalty
        loss = loss + self.l2 * torch.mean(
            torch.stack([self.weightsum(batch[npair[0], :], batch[npair[1], :]) for npair in sampled_npairs]))

        return torch.mean(loss)


"""================================================================================================="""


# MarginLoss with trainable class separation margin beta. Runs on Mini-batches as well.
class MarginLoss(torch.nn.Module):
    def __init__(self, margin=0.2, nu=0, beta=1.2, n_classes=100, beta_constant=False, sampling_method='distance'):
        """
        Basic Margin Loss as proposed in 'Sampling Matters in Deep Embedding Learning'.

        Args:
            margin:          float, fixed triplet margin (see also TripletLoss).
            nu:              float, regularisation weight for beta. Zero by default (in literature as well).
            beta:            float, initial value for trainable class margins. Set to default literature value.
            n_classes:       int, number of target class. Required because it dictates the number of trainable class margins.
            beta_constant:   bool, set to True if betas should not be trained.
            sampling_method: str, sampling method to use to generate training triplets.
        Returns:
            Nothing!
        """
        super(MarginLoss, self).__init__()
        self.margin = margin
        self.n_classes = n_classes
        self.beta_constant = beta_constant

        self.beta_val = beta
        self.beta = beta if beta_constant else torch.nn.Parameter(torch.ones(n_classes) * beta)

        self.nu = nu

        self.sampling_method = sampling_method
        self.sampler = TupleSampler(method=sampling_method)

    def forward(self, batch, labels):
        """
        Args:
            batch:   torch.Tensor() [(BS x embed_dim)], batch of embeddings
            labels:  np.ndarray [(BS x 1)], for each element of the batch assigns a class [0,...,C-1]
        Returns:
            margin loss (torch.Tensor(), batch-averaged)
        """
        if isinstance(labels, torch.Tensor):
            labels = labels.detach().cpu().numpy()

        sampled_triplets = self.sampler.give(batch, labels)

        # Compute distances between anchor-positive and anchor-negative.
        d_ap, d_an = [], []
        for triplet in sampled_triplets:
            train_triplet = {
                'Anchor': batch[triplet[0], :],
                'Positive': batch[triplet[1], :],
                'Negative': batch[triplet[2]]
            }

            pos_dist = ((train_triplet['Anchor'] - train_triplet['Positive']).pow(2).sum() + 1e-8).pow(1 / 2)
            neg_dist = ((train_triplet['Anchor'] - train_triplet['Negative']).pow(2).sum() + 1e-8).pow(1 / 2)

            d_ap.append(pos_dist)
            d_an.append(neg_dist)
        d_ap, d_an = torch.stack(d_ap), torch.stack(d_an)

        # Group betas together by anchor class in sampled triplets (as each beta belongs to one class).
        if self.beta_constant:
            beta = self.beta
        else:
            beta = torch.stack([self.beta[labels[triplet[0]]]
                                for triplet in sampled_triplets]).type(torch.cuda.FloatTensor)

        # Compute actual margin postive and margin negative loss
        pos_loss = torch.nn.functional.relu(d_ap - beta + self.margin)
        neg_loss = torch.nn.functional.relu(beta - d_an + self.margin)

        # Compute normalization constant
        pair_count = torch.sum((pos_loss > 0.) + (neg_loss > 0.)).type(torch.cuda.FloatTensor)

        # Actual Margin Loss
        loss = torch.sum(pos_loss + neg_loss) if pair_count == 0. else torch.sum(pos_loss + neg_loss) / pair_count

        # (Optional) Add regularization penalty on betas.
        if self.nu:
            loss = loss + beta.type(torch.cuda.FloatTensor)

        return loss


"""================================================================================================="""


# ProxyNCALoss containing trainable class proxies. Works independent of batch size.
class ProxyNCALoss(torch.nn.Module):
    def __init__(self, num_proxies, embedding_dim):
        """
        Basic ProxyNCA Loss as proposed in 'No Fuss Distance Metric Learning using Proxies'.

        Args:
            num_proxies:     int, number of proxies to use to estimate data groups. Usually set to number of classes.
            embedding_dim:   int, Required to generate initial proxies which are the same size as the actual data embeddings.
        Returns:
            Nothing!
        """
        super(ProxyNCALoss, self).__init__()
        self.num_proxies = num_proxies
        self.embedding_dim = embedding_dim
        self.PROXIES = torch.nn.Parameter(torch.randn(num_proxies, self.embedding_dim) / 8)
        self.all_classes = torch.arange(num_proxies)

    def forward(self, batch, labels):
        """
        Args:
            batch:   torch.Tensor() [(BS x embed_dim)], batch of embeddings
            labels:  np.ndarray [(BS x 1)], for each element of the batch assigns a class [0,...,C-1]
        Returns:
            proxynca loss (torch.Tensor(), batch-averaged)
        """
        # Normalize batch in case it is not normalized (which should never be the case for ProxyNCA, but still).
        # Same for the PROXIES. Note that the multiplication by 3 seems arbitrary, but helps the actual training.
        batch = 3 * torch.nn.functional.normalize(batch, dim=1)
        PROXIES = 3 * torch.nn.functional.normalize(self.PROXIES, dim=1)
        # Group required proxies
        pos_proxies = torch.stack([PROXIES[pos_label:pos_label + 1, :] for pos_label in labels])
        neg_proxies = torch.stack(
            [torch.cat([self.all_classes[:class_label], self.all_classes[class_label + 1:]]) for class_label in labels])
        neg_proxies = torch.stack([PROXIES[neg_labels, :] for neg_labels in neg_proxies])
        # Compute Proxy-distances
        dist_to_neg_proxies = torch.sum((batch[:, None, :] - neg_proxies).pow(2), dim=-1)
        dist_to_pos_proxies = torch.sum((batch[:, None, :] - pos_proxies).pow(2), dim=-1)
        # Compute final proxy-based NCA loss
        negative_log_proxy_nca_loss = torch.mean(dist_to_pos_proxies[:, 0] +
                                                 torch.logsumexp(-dist_to_neg_proxies, dim=1))
        return negative_log_proxy_nca_loss


"""================================================================================================"""


def binarize(T, nb_classes):
    T = T.cpu().numpy()
    import sklearn.preprocessing
    T = sklearn.preprocessing.label_binarize(T, classes=range(0, nb_classes))
    T = torch.FloatTensor(T).cuda()
    return T


def l2_norm(input):
    input_size = input.size()
    buffer = torch.pow(input, 2)
    normp = torch.sum(buffer, 1).add_(1e-12)
    norm = torch.sqrt(normp)
    _output = torch.div(input, norm.view(-1, 1).expand_as(input))
    output = _output.view(input_size)
    return output


# Proxy Anchor Loss
class ProxyAnchorLoss(torch.nn.Module):
    def __init__(self, nb_classes, sz_embed, mrg=0.1, alpha=32):
        torch.nn.Module.__init__(self)
        # Proxy Anchor Initialization
        self.proxies = torch.nn.Parameter(torch.randn(nb_classes, sz_embed).cuda())
        torch.nn.init.kaiming_normal_(self.proxies, mode='fan_out')

        self.nb_classes = nb_classes
        self.sz_embed = sz_embed
        self.mrg = mrg
        self.alpha = alpha

    def forward(self, X, T):
        P = self.proxies

        cos = F.linear(l2_norm(X), l2_norm(P))  # Calcluate cosine similarity
        P_one_hot = binarize(T=T, nb_classes=self.nb_classes)
        N_one_hot = 1 - P_one_hot

        pos_exp = torch.exp(-self.alpha * (cos - self.mrg))
        neg_exp = torch.exp(self.alpha * (cos + self.mrg))

        # P_one_hot is [N, K]
        # Where N is the batch size, K is the number of classes
        # P_one_hot.sum(dim = 0) is [1, K]
        # P_one_hot.sum(dim = 0) != 0 is the bool array, True means that class is not empty
        # torch.nonzero will return the index array for elements which is not zero
        # with_pos_proxies is the class number which has at least one element in the data
        with_pos_proxies = torch.nonzero(P_one_hot.sum(dim=0) != 0).squeeze(
            dim=1)  # The set of positive proxies of data in the batch
        num_valid_proxies = len(with_pos_proxies)  # The number of positive proxies

        P_sim_sum = torch.where(P_one_hot == 1, pos_exp, torch.zeros_like(pos_exp)).sum(dim=0)
        N_sim_sum = torch.where(N_one_hot == 1, neg_exp, torch.zeros_like(neg_exp)).sum(dim=0)

        pos_term = torch.log(1 + P_sim_sum).sum() / num_valid_proxies
        neg_term = torch.log(1 + N_sim_sum).sum() / self.nb_classes
        loss = pos_term + neg_term

        return loss


"""================================================================================================="""


class CEClassLoss(torch.nn.Module):
    def __init__(self, inp_dim, n_classes):
        """
        Basic Cross Entropy Loss for reference. Can be useful.
        Contains its own mapping network, so the actual network can remain untouched.

        Args:
            inp_dim:   int, embedding dimension of network.
            n_classes: int, number of target classes.
        Returns:
            Nothing!
        """
        super(CEClassLoss, self).__init__()
        self.mapper = torch.nn.Sequential(torch.nn.Linear(inp_dim, n_classes))
        self.ce_loss = torch.nn.CrossEntropyLoss()

    def forward(self, batch, labels):
        """
        Args:
            batch:   torch.Tensor() [(BS x embed_dim)], batch of embeddings
            labels:  np.ndarray [(BS x 1)], for each element of the batch assigns a class [0,...,C-1]
        Returns:
            cross-entropy loss (torch.Tensor(), batch-averaged by default)
        """
        return self.ce_loss(self.mapper(batch), labels.type(torch.cuda.LongTensor))


"""================================================================================================="""


class MultiSimilarityLoss(torch.nn.Module):
    """
        Based on the CVPR-19 paper: Multi-Similarity Loss with General Pair Weighting for Deep Metric Learning
    """
    def __init__(self, margin, thresh):
        super(MultiSimilarityLoss, self).__init__()
        self.thresh = thresh
        self.margin = margin

        self.scale_pos = 2.0
        self.scale_neg = 40.0

    def forward(self, batch, labels):
        """
            batch' shape is [B, N], where N is the diemnsion of each embedding
            labels' shape is [B, 1]

        """
        assert batch.size(0) == labels.size(0), \
            f"batch.size(0): {batch.size(0)} is not equal to labels.size(0): {labels.size(0)}"

        batch_size = batch.size(0)
        sim_mat = torch.matmul(batch, torch.t(batch))

        epsilon = 1e-5
        loss = list()

        for i in range(batch_size):
            pos_pair_ = sim_mat[i][labels == labels[i]]  # Get the distance list with positive nodes
            pos_pair_ = pos_pair_[pos_pair_ < 1 - epsilon]  # Filter some far positive node by 1 - epsilon
            neg_pair_ = sim_mat[i][labels != labels[i]]  # Get the distance list with negative nodes

            if len(neg_pair_) < 1 or len(pos_pair_) < 1:
                continue

            neg_pair = neg_pair_[neg_pair_ + self.margin >
                                 min(pos_pair_)]  # Filter negative distances by " d_n + margin > min(pos_dist) "
            pos_pair = pos_pair_[pos_pair_ - self.margin <
                                 max(neg_pair_)]  # Filter postive distances by " d_p - margin < max(neg_dist) "

            # After the filtering, we remove some abnormal nodes in two lists
            # Remove the closer negative nodes and the far positive nodes

            if len(neg_pair) < 1 or len(pos_pair) < 1:  # If all the nodes are abnormal
                continue

            # Proxy Anchor Loss has the similar format of the loss pos_loss + neg_loss
            # These two losses are also log(1+sum(exp(~)))
            # It seems that the PAL just applies the MS into proxy setting

            # weighting step
            pos_loss = 1.0 / self.scale_pos * torch.log(1 +
                                                        torch.sum(torch.exp(-self.scale_pos *
                                                                            (pos_pair - self.thresh))))
            neg_loss = 1.0 / self.scale_neg * torch.log(1 +
                                                        torch.sum(torch.exp(self.scale_neg * (neg_pair - self.thresh))))
            loss.append(pos_loss + neg_loss)

        if len(loss) == 0:
            return torch.zeros([], requires_grad=True)

        loss = sum(loss) / batch_size
        return loss


"""================================================================================================="""


class SoftTripleLoss(torch.nn.Module):
    """
    Based on the ICCV-19 paper: SoftTriple Loss: Deep Metric Learning Without Triplet Sampling
    """
    def __init__(self, la, gamma, tau, margin, dim, cN, K):
        super(SoftTripleLoss, self).__init__()
        self.la = la
        self.gamma = 1. / gamma
        self.tau = tau
        self.margin = margin
        self.cN = cN
        self.K = K
        self.fc = Parameter(torch.Tensor(dim, cN * K))
        self.weight = torch.zeros(cN * K, cN * K, dtype=torch.bool).cuda()
        for i in range(0, cN):
            for j in range(0, K):
                # For the j center of i class
                # Set the [i*K + j + 1 : (i + 1)*K] = 1
                # That means from j+1 to K
                # It is actually a triangle
                self.weight[i * K + j, i * K + j + 1:(i + 1) * K] = 1

        torch.nn.kaiming_uniform_(self.fc, a=math.sqrt(5))
        return

    def forward(self, batch, target):
        centers = F.normalize(self.fc, p=2, dim=0)  # Normalizing K centers for each class
        simInd = batch.matmul(centers)  # Calculating the similarity between samples and centers
        simStruc = simInd.reshape(-1, self.cN, self.K)
        prob = F.softmax(simStruc * self.gamma,
                         dim=2)  # Get the probabilty vector for K center, prob has the shape [B, cN, K]
        simClass = torch.sum(prob * simStruc, dim=2)  # simClass is the Relaxed Similarity, it has the shape [B, cN]
        marginM = torch.zeros(simClass.shape).cuda()
        marginM[torch.arange(0, marginM.shape[0]),
                target] = self.margin  # Give a margin value to the target dimension for each sample
        lossClassify = F.cross_entropy(self.la * (simClass - marginM), target)
        if self.tau > 0 and self.K > 1:
            simCenter = centers.t().matmul(centers)  # The simCenter has the shape [cN*K, cN*K]
            reg = torch.sum(torch.sqrt(2.0 + 1e-5 - 2. * simCenter[self.weight])) / (self.cN * self.K * (self.K - 1.))
            return lossClassify + self.tau * reg
        else:
            return lossClassify
