import numpy as np
import torch
import torch.nn as nn

SMALL_NUM = np.log(1e-45)


class DCL(object):
    """
    Decoupled Contrastive Loss proposed in https://arxiv.org/pdf/2110.06848.pdf
    weight: the weighting function of the positive sample loss
    temperature: temperature to control the sharpness of the distribution
    """

    def __init__(self, temperature=0.1, weight_fn=None):
        super(DCL, self).__init__()
        self.temperature = temperature
        self.weight_fn = weight_fn

    def __call__(self, z1, z2):
        """
        Calculate one way DCL loss
        :param z1: first embedding vector
        :param z2: second embedding vector
        :return: one-way loss
        """
        cross_view_distance = torch.mm(z1, z2.t())
        positive_loss = -torch.diag(cross_view_distance) / self.temperature
        if self.weight_fn is not None:
            positive_loss = positive_loss * self.weight_fn(z1, z2)
        neg_similarity = torch.cat((torch.mm(z1, z1.t()), cross_view_distance), dim=1) / self.temperature
        neg_mask = torch.eye(z1.size(0), device=z1.device).repeat(1, 2)
        negative_loss = torch.logsumexp(neg_similarity + neg_mask * SMALL_NUM, dim=1, keepdim=False)
        return (positive_loss + negative_loss).mean()


class DCLW(DCL):
    """
    Decoupled Contrastive Loss with negative von Mises-Fisher weighting proposed in https://arxiv.org/pdf/2110.06848.pdf
    sigma: the weighting function of the positive sample loss
    temperature: temperature to control the sharpness of the distribution
    """
    def __init__(self, sigma=0.5, temperature=0.1):
        weight_fn = lambda z1, z2: 2 - z1.size(0) * torch.nn.functional.softmax((z1 * z2).sum(dim=1) / sigma, dim=0).squeeze()
        super(DCLW, self).__init__(weight_fn=weight_fn, temperature=temperature)


import torch.nn as nn

class MoCoDCLWLoss(nn.Module):
    """
    DCLW Loss adapted for MoCo framework, using negative keys from the queue.
    sigma: parameter for the weighting function
    temperature: temperature scaling
    """
    def __init__(self, sigma=0.5, temperature=0.1):
        super().__init__()
        self.temperature = temperature
        self.sigma = sigma

    def _get_weight(self, q, k_pos):
        """Calculates the negative von Mises-Fisher like weight"""
        # Ensure vectors are normalized (assuming they are already L2 normalized)
        # If not, uncomment the following lines:
        # q = nn.functional.normalize(q, dim=1)
        # k_pos = nn.functional.normalize(k_pos, dim=1)

        similarity = torch.sum(q * k_pos, dim=1) # Calculate dot product for each pair in the batch
        scaled_similarity = similarity / self.sigma
        # Use softmax over the batch to approximate expectation for normalization
        softmax_sim = nn.functional.softmax(scaled_similarity, dim=0)
        # Approx weight: w = 2 - exp(...) / E[exp(...)]
        weight = 2 - q.size(0) * softmax_sim # q.size(0) is batch size N
        # Detach weight to prevent gradients flowing back through the weight calculation
        # as per standard practice in some weighting schemes, although debatable.
        # return weight.detach()
        # Or allow gradients:
        return weight


    def forward(self, q, k_pos, k_neg):
        """
        Calculates MoCo-DCLW loss.
        :param q: Query features, shape [N, D]
        :param k_pos: Positive key features, shape [N, D]
        :param k_neg: Negative key features from queue, shape [K, D]
        :return: Scalar loss value
        """
        N = q.size(0)
        K = k_neg.size(0)
        D = q.size(1)

        # --- Positive Loss Term ---
        # Calculate similarity between query and positive key
        pos_similarity = torch.sum(q * k_pos, dim=1) # Shape [N]
        positive_loss_term = -pos_similarity / self.temperature # Shape [N]

        # Calculate and apply weight
        weight = self._get_weight(q, k_pos) # Shape [N]
        weighted_positive_loss = positive_loss_term * weight # Shape [N]

        # --- Negative Loss Term ---
        # Calculate similarity between query and all negative keys from queue
        # q shape: [N, D], k_neg.t() shape: [D, K] -> neg_similarity shape: [N, K]
        neg_similarity = torch.mm(q, k_neg.t()) / self.temperature

        # Calculate logsumexp over negative keys for each query
        # Add SMALL_NUM? Not strictly necessary here unless there are specific bad values
        # but logsumexp is generally stable.
        log_sum_exp_neg = torch.logsumexp(neg_similarity, dim=1) # Shape [N]

        # --- Combine Terms ---
        loss = weighted_positive_loss + log_sum_exp_neg # Shape [N]

        # Return the mean loss over the batch
        return loss.mean()
    
class CombinedLoss(nn.Module): # 建议继承 nn.Module
    def __init__(self, ce_weight=1.0, dclw_weight=1.0, sigma=0.5, temperature_dclw=0.1):
        """
        Combines CrossEntropy (InfoNCE) loss and MoCo-adapted DCLW loss.

        Args:
            ce_weight (float): Weight for the CrossEntropy loss term.
            dclw_weight (float): Weight for the DCLW loss term.
            sigma (float): Sigma parameter for the DCLW weight function.
            temperature_dclw (float): Temperature for the DCLW loss calculation.
                                     Note: Temperature for CE loss is applied within the model's
                                     logit calculation based on model's self.temperature.
        """
        super().__init__()
        self.ce_weight = ce_weight
        self.dclw_weight = dclw_weight

        # Instantiate the CORRECT MoCo-adapted DCLW loss
        self.dclw_criterion = MoCoDCLWLoss(sigma=sigma, temperature=temperature_dclw)

        # Instantiate CrossEntropy Loss
        self.ce_criterion = nn.CrossEntropyLoss()

    def forward(self, q, k_pos, k_neg, logits, zeros, **kwargs): # Allow receiving unused q_enc etc.
        """
        Calculates the combined loss.

        Args:
            q (Tensor): Query features [N, D].
            k_pos (Tensor): Positive key features [N, D].
            k_neg (Tensor): Negative key features from queue [K, D].
            logits (Tensor): Pre-computed logits for CE loss [N, 1+K].
            zeros (Tensor): Target labels (all zeros) for CE loss [N].
            **kwargs: To potentially catch other unused returns like q_enc.

        Returns:
            Tensor: Combined scalar loss.
            dict: Dictionary containing individual loss values for monitoring.
        """
        loss_dclw = self.dclw_criterion(q, k_pos, k_neg)
        loss_ce = self.ce_criterion(logits.float(), zeros.long())

        total_loss = (self.ce_weight * loss_ce) + (self.dclw_weight * loss_dclw)

        # Return individual losses as well for logging/monitoring
        loss_dict = {
            'loss_ce': loss_ce.item(),
            'loss_dclw': loss_dclw.item(),
            'total_loss': total_loss # Return the tensor for backprop
        }

        return total_loss, loss_dict # Return dict for easier monitoring