import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd.function import Function
from torch.autograd import Variable


class OriTripletLoss(nn.Module):
    """Triplet loss with hard positive/negative mining.
    
    Reference:
    Hermans et al. In Defense of the Triplet Loss for Person Re-Identification. arXiv:1703.07737.
    Code imported from https://github.com/Cysu/open-reid/blob/master/reid/loss/triplet.py.
    
    Args:
    - margin (float): margin for triplet.
    """
    
    def __init__(self, batch_size, margin=0.3):
        super(OriTripletLoss, self).__init__()
        self.margin = margin
        self.ranking_loss = nn.MarginRankingLoss(margin=margin)

    def forward(self, inputs, targets):
        """
        Args:
        - inputs: feature matrix with shape (batch_size, feat_dim)
        - targets: ground truth labels with shape (num_classes)
        """
        n = inputs.size(0)
        
        # Compute pairwise distance, replace by the official when merged
        dist = torch.pow(inputs, 2).sum(dim=1, keepdim=True).expand(n, n)
        dist = dist + dist.t()
        dist.addmm_(1, -2, inputs, inputs.t())
        dist = dist.clamp(min=1e-12).sqrt()  # for numerical stability
        
        # For each anchor, find the hardest positive and negative
        mask = targets.expand(n, n).eq(targets.expand(n, n).t())
        dist_ap, dist_an = [], []
        for i in range(n):
            dist_ap.append(dist[i][mask[i]].max().unsqueeze(0))
            dist_an.append(dist[i][mask[i] == 0].min().unsqueeze(0))
        dist_ap = torch.cat(dist_ap)
        dist_an = torch.cat(dist_an)
        
        # Compute ranking hinge loss
        y = torch.ones_like(dist_an)
        loss = self.ranking_loss(dist_an, dist_ap, y)
        
        # compute accuracy
        correct = torch.ge(dist_an, dist_ap).sum().item()
        return loss, correct

# Adaptive weights
def softmax_weights(dist, mask):
    max_v = torch.max(dist * mask, dim=1, keepdim=True)[0]
    diff = dist - max_v
    Z = torch.sum(torch.exp(diff) * mask, dim=1, keepdim=True) + 1e-6 # avoid division by zero
    W = torch.exp(diff) * mask / Z
    return W

def normalize(x, axis=-1):
    """Normalizing to unit length along the specified dimension.
    Args:
      x: pytorch Variable
    Returns:
      x: pytorch Variable, same shape as input
    """
    x = 1. * x / (torch.norm(x, 2, axis, keepdim=True).expand_as(x) + 1e-12)
    return x

class TripletLoss_WRT(nn.Module):
    """Weighted Regularized Triplet'."""

    def __init__(self):
        super(TripletLoss_WRT, self).__init__()
        self.ranking_loss = nn.SoftMarginLoss()

    def forward(self, inputs, targets, normalize_feature=False):
        if normalize_feature:
            inputs = normalize(inputs, axis=-1)
        dist_mat = pdist_torch(inputs, inputs)

        N = dist_mat.size(0)
        # shape [N, N]
        is_pos = targets.expand(N, N).eq(targets.expand(N, N).t()).float()
        is_neg = targets.expand(N, N).ne(targets.expand(N, N).t()).float()

        # `dist_ap` means distance(anchor, positive)
        # both `dist_ap` and `relative_p_inds` with shape [N, 1]
        dist_ap = dist_mat * is_pos
        dist_an = dist_mat * is_neg

        weights_ap = softmax_weights(dist_ap, is_pos)
        weights_an = softmax_weights(-dist_an, is_neg)
        furthest_positive = torch.sum(dist_ap * weights_ap, dim=1)
        closest_negative = torch.sum(dist_an * weights_an, dim=1)

        y = furthest_positive.new().resize_as_(furthest_positive).fill_(1)
        loss = self.ranking_loss(closest_negative - furthest_positive, y)


        # compute accuracy
        correct = torch.ge(closest_negative, furthest_positive).sum().item()
        return loss, correct
        
def pdist_torch(emb1, emb2):
    '''
    compute the eucilidean distance matrix between embeddings1 and embeddings2
    using gpu
    '''
    m, n = emb1.shape[0], emb2.shape[0]
    emb1_pow = torch.pow(emb1, 2).sum(dim = 1, keepdim = True).expand(m, n)
    emb2_pow = torch.pow(emb2, 2).sum(dim = 1, keepdim = True).expand(n, m).t()
    dist_mtx = emb1_pow + emb2_pow
    dist_mtx = dist_mtx.addmm(emb1, emb2.t(), beta=1, alpha=-2)
    # dist_mtx = dist_mtx.addmm_(1, -2, emb1, emb2.t())
    # dist_mtx = dist_mtx.clamp(min = 1e-12)
    dist_mtx = dist_mtx.clamp(min = 1e-12).sqrt()
    return dist_mtx    


def pdist_np(emb1, emb2):
    '''
    compute the eucilidean distance matrix between embeddings1 and embeddings2
    using cpu
    '''
    m, n = emb1.shape[0], emb2.shape[0]
    emb1_pow = np.square(emb1).sum(axis = 1)[..., np.newaxis]
    emb2_pow = np.square(emb2).sum(axis = 1)[np.newaxis, ...]
    dist_mtx = -2 * np.matmul(emb1, emb2.T) + emb1_pow + emb2_pow
    # dist_mtx = np.sqrt(dist_mtx.clip(min = 1e-12))
    return dist_mtx

class InfoNCELoss(nn.Module):
    def __init__(self, temperature=0.05):
        """
        InfoNCE损失类，用于计算跨模态对比损失。
        Args:
            temperature (float): 温度参数，用于缩放相似度。
        """
        super(InfoNCELoss, self).__init__()
        self.temperature = temperature

    def forward(self,features,labels):
        """
        计算InfoNCE损失。
        Args:
            features (torch.Tensor): 可见光特征和红外光特征拼接，形状为[B, dim]。
            labels (torch.Tensor): 对应的身份标签拼接，形状为[B]。
        Returns:
            torch.Tensor: 计算得到的InfoNCE损失。
        """
        features = torch.nn.functional.normalize(features, p=2, dim=1)
        # 计算特征向量之间的相似度矩阵
        similarity_matrix = torch.matmul(features, features.T) /self.temperature
        # 对similarity_matrix中的每个元素进行exp运算
        exp_similarity_matrix = torch.exp(similarity_matrix)
        # 去掉对角线
        # exp_similarity_matrix.fill_diagonal_(0)
        eye_mask = torch.eye(exp_similarity_matrix.size(0), device=exp_similarity_matrix.device).bool()
        exp_similarity_matrix = exp_similarity_matrix.masked_fill(eye_mask, 0)
        # 创建一个掩码矩阵，用于标识哪些样本属于同一个类别
        mask = torch.eq(labels.unsqueeze(1), labels.unsqueeze(0))
        # 保留 exp_similarity_matrix 中 mask 为 True 的位置
        selected_exp_similarity_matrix = exp_similarity_matrix * mask.float()
        # 取反掩码矩阵，将 True -> False，False -> True
        # inverse_mask = ~mask
        # 使用逐元素相乘保留 mask 为 False 的位置
        # selected_exp_dissimilarity_matrix = exp_similarity_matrix * inverse_mask.float()
        # 分母
        # summed_exp_similarity_matrix = torch.sum(selected_exp_dissimilarity_matrix, dim=1)
        summed_exp_similarity_matrix = torch.sum(exp_similarity_matrix, dim=1)

        # 分子
        summed_selected_exp_similarity_matrix = torch.sum(selected_exp_similarity_matrix, dim=1)
        # 相除
        divided_matrix = torch.div(summed_selected_exp_similarity_matrix,summed_exp_similarity_matrix)
        # 对divided_matrix进行log运算
        log_divided_matrix = torch.log(divided_matrix)
        # 将log_divided_matrix中所有元素相加得到一个标量
        sum_log_divided_matrix = torch.sum(log_divided_matrix)
        loss = sum_log_divided_matrix * (-1 / features.size(0))
        return loss

class CrossModalNCELoss(nn.Module):
    def __init__(self, temperature=0.07, epsilon=1e-8):
        super().__init__()
        self.temperature = temperature
        self.eps = epsilon  # 防止log(0)

    def forward(self, embeddings, labels):
        """
        输入:
            input1: 红外光嵌入 [batch_size, feat_dim]
            input2: 可见光嵌入 [batch_size, feat_dim]
            labels1: input1的ID标签 [batch_size]
            labels2: input2的ID标签 [batch_size] (必须与labels1相同)
        """
        # 合并两个模态的嵌入和标签
        # embeddings = torch.cat([input1, input2], dim=0)  # [2*batch_size, feat_dim]
        # labels = torch.cat([labels1, labels2], dim=0)    # [2*batch_size]
        # batch_size = input1.size(0)
        # num_total = 2 * batch_size  # 总样本数（红外+可见光）
        num_total = embeddings.size(0)  # 总样本数（红外+可见光）

        # 归一化嵌入向量（余弦相似度）
        embeddings = F.normalize(embeddings, p=2, dim=1)

        # 计算相似度矩阵 [num_total, num_total]
        sim_matrix = torch.mm(embeddings, embeddings.T)  # 点积等价于余弦相似度

        # 构建正样本掩码（同一ID且排除自身）
        mask = torch.eq(labels.view(-1, 1), labels.view(1, -1)).bool()  # [num_total, num_total]
        mask.fill_diagonal_(False)  # 排除自己与自己对比

        # 分离正样本和负样本的相似度
        pos_sim = sim_matrix[mask].view(num_total, -1)   # 正样本相似度 [num_total, num_pos]
        neg_sim = sim_matrix[~mask].view(num_total, -1)  # 负样本相似度 [num_total, num_neg]

        # 构造多正样本Logits
        logits = torch.cat([pos_sim, neg_sim], dim=1) / self.temperature  # [num_total, num_pos + num_neg]

        # 生成目标标签：前num_pos位置为1（正样本），其余为0
        num_pos = pos_sim.size(1)
        targets = torch.zeros_like(logits)
        targets[:, :num_pos] = 1.0  # 正样本位置标记为1

        # 计算加权交叉熵损失
        loss = - (targets * F.log_softmax(logits, dim=-1)).sum(dim=1)
        loss = loss.mean()  # 对所有样本求平均

        return loss

class SupervisedInfoNCE(torch.nn.Module):
    def __init__(self, temperature=0.07, supervised=False):
        """
        kappa_init: Float, = 1 / temperature
        n_samples: int, how many samples to draw from the vMF distributions
        supervised: bool, whether to define positivity/negativity from class labels (target) or to ignore them
                    and only consider the two crops of the same image as positive
        """
        super().__init__()

        self.temperature = temperature
        self.supervised = supervised

    def forward(self, features, target):
        features = F.normalize(features, dim=-1)

        # Calculate similarities
        sim = features.matmul(features.transpose(-2, -1)) / self.temperature

        # Build positive and negative masks
        mask = (target.unsqueeze(1) == target.t().unsqueeze(0)).float()
        pos_mask = mask - torch.diag(torch.ones(mask.shape[0], device=mask.device))
        neg_mask = 1 - mask

        # Things with mask = 0 should be ignored in the sum.
        # If we just gave a zero, it would be log sum exp(0) != 0
        # So we need to give them a small value, with log sum exp(-1000) \approx 0
        pos_mask_add = neg_mask * (-1000)
        neg_mask_add = pos_mask * (-1000)

        # calculate the standard log contrastive loss for each vmf sample ([batch])
        log_infonce_per_example = (sim * pos_mask + pos_mask_add).logsumexp(-1) - (
                    sim * neg_mask + neg_mask_add).logsumexp(-1)

        # Calculate loss ([1])
        log_infonce = torch.mean(log_infonce_per_example)
        return -log_infonce


class DomainLoss(nn.Module):
    def __init__(self):
        super(DomainLoss, self).__init__()
        # 初始化交叉熵损失函数
        self.loss_fn = nn.CrossEntropyLoss()  # 使用交叉熵损失
    def forward(self, domain_out):
        labels = torch.cat([
            torch.zeros(domain_out.size(0)//2, dtype=torch.long),  # 可见光模态标签为0
            torch.ones(domain_out.size(0)//2, dtype=torch.long)  # 红外光模态标签为1
        ]).to(domain_out.device)
        # 计算损失
        loss=self.loss_fn(domain_out, labels)
        return loss

class CrossModalCenterLoss(nn.Module):
    """Center loss.

    Reference:
    Wen et al. A Discriminative Feature Learning Approach for Deep Face Recognition. ECCV 2016.

    Args:
        num_classes (int): number of classes.
        feat_dim (int): feature dimension.
    """

    def __init__(self, num_classes, feat_dim=2048, use_gpu=True):
        super(CrossModalCenterLoss, self).__init__()
        self.num_classes = num_classes
        self.feat_dim = feat_dim
        self.use_gpu = use_gpu

        if self.use_gpu:
            self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim).cuda())
        else:
            self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim))

    def forward(self, x, labels):
        """
        Args:
            x: feature matrix with shape (batch_size, feat_dim).
            labels: ground truth labels with shape (batch_size).
        """
        batch_size = x.size(0)
        distmat = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(batch_size, self.num_classes) + \
                  torch.pow(self.centers, 2).sum(dim=1, keepdim=True).expand(self.num_classes, batch_size).t()
        # distmat.addmm_(1, -2, x, self.centers.t())
        distmat.addmm_(x, self.centers.t(), beta=1, alpha=-2)
        classes = torch.arange(self.num_classes).long()
        if self.use_gpu: classes = classes.cuda()
        labels = labels.unsqueeze(1).expand(batch_size, self.num_classes)
        mask = labels.eq(classes.expand(batch_size, self.num_classes))

        dist = distmat * mask.float()
        loss = dist.clamp(min=1e-12, max=1e+12).sum() / batch_size

        return loss