import torch
import torch.nn as nn
import torch.nn.functional as F
import math

class SubCenterArcFace(nn.Module):
    def __init__(self, embedding_size, num_classes, k, margin=0.1, scale=16.0, easy_margin=False):
        """
        Sub-center ArcFace 损失函数。

        参数:
            embedding_size (int): 特征向量的维度。
            num_classes (int): 类别数量。
            num_sub_centers (int): 每个类别的子中心数量。
            margin (float): 加性角度间隔。
            scale (float): 缩放因子。
        """
        super(SubCenterArcFace, self).__init__()
        self.name = 'SubCenterArcFace'
        self.embedding_size = embedding_size
        self.num_classes = num_classes
        self.k = k
        self.margin = margin
        self.easy_margin = easy_margin
        self.cos_margin = math.cos(margin)
        self.sin_margin = math.sin(margin)
        self.threshold = math.cos(math.pi - margin)
        self.mm = math.sin(math.pi - margin) * margin
        self.mmm = 1.0 + math.cos(math.pi - margin)
        self.scale = scale

        # 子中心权重矩阵
        self.W = nn.Parameter(torch.FloatTensor(k, embedding_size, num_classes))
        nn.init.xavier_uniform_(self.W)

    def update(self, margin=0.5):
        self.margin = margin
        self.cos_margin = math.cos(margin)
        self.sin_margin = math.sin(margin)
        self.threshold = math.cos(math.pi - margin)
        self.mm = math.sin(math.pi - margin) * margin
        self.mmm = 1.0 + math.cos(math.pi - margin)

    def forward(self, x, labels):
        """
        计算 Sub-center ArcFace 损失。

        参数:
            x (torch.Tensor): 输入特征向量，形状为 (batch_size, embedding_size)。
            labels (torch.Tensor): 类别标签，形状为 (batch_size,)。

        返回:
            torch.Tensor: 损失值。
        """
        # 归一化特征向量和权重
        x_norm = F.normalize(x, p=2, dim=1)  # (batch_size, embedding_size)
        W_norm = F.normalize(self.W, p=2, dim=2)  # (num_sub_centers, embedding_size, num_classes)

        # 计算余弦相似度
        cosine = torch.matmul(x_norm, W_norm)  # (num_sub_centers, batch_size, num_classes)
        # cosine = cosine.transpose(0, 1)
        cosine = cosine.max(dim=0).values  # (batch_size, num_classes)
        cosine = torch.clamp(cosine, -1.0 + 1e-7, 1.0 - 1e-7)  # (batch_size, num_classes)

        # 计算目标类别的角度
        sine = torch.sqrt(1.0 - torch.pow(cosine, 2)).clamp(0,1)
        phi = cosine*self.cos_margin - sine*self.sin_margin
        if self.easy_margin: 
            phi = torch.where(cosine > 0, phi, cosine)
        else: 
            # phi = torch.where(cosine > self.th, phi, cosine - self.mm)
            phi = torch.where(cosine > self.threshold, phi, cosine - self.mmm)

        if labels is not None:
            one_hot = F.one_hot(labels, self.num_classes)  # (batch_size, num_classes)
            # 缩放 logits
            logits = self.scale * (one_hot * phi + (1 - one_hot) * cosine)
        else:
            logits = self.scale * cosine

        return logits

class SubCenterArcFace_diversity(nn.Module):
    def __init__(self, embedding_size, num_classes, k, margin=0.5, scale=64.0, easy_margin=False):
        """
        Sub-center ArcFace 损失函数。

        参数:
            embedding_size (int): 特征向量的维度。
            num_classes (int): 类别数量。
            num_sub_centers (int): 每个类别的子中心数量。
            margin (float): 加性角度间隔。
            scale (float): 缩放因子。
        """
        super(SubCenterArcFace_diversity, self).__init__()
        self.name = 'SubCenterArcFace'
        self.embedding_size = embedding_size
        self.num_classes = num_classes
        self.k = k
        self.margin = margin
        self.easy_margin = easy_margin
        self.cos_margin = math.cos(margin)
        self.sin_margin = math.sin(margin)
        self.threshold = math.cos(math.pi - margin)
        self.mm = math.sin(math.pi - margin) * margin
        self.mmm = 1.0 + math.cos(math.pi - margin)
        self.scale = scale
        # tau:多样性约束因子，取值为0.1-0.9，值越大表示越不容易惩罚
        self.tau = 0.5
        self.tau_factor = 0.1

        # 子中心权重矩阵
        self.W = nn.Parameter(torch.FloatTensor(k, embedding_size, num_classes))
        nn.init.xavier_uniform_(self.W)

    def update(self, margin=0.5):
        self.margin = margin
        self.cos_margin = math.cos(margin)
        self.sin_margin = math.sin(margin)
        self.threshold = math.cos(math.pi - margin)
        self.mm = math.sin(math.pi - margin) * margin
        self.mmm = 1.0 + math.cos(math.pi - margin)
        
    def diversity_penalty(self):
        loss = 0
        # 原文self.W = num_classes, k, feat_dim
        # 这里self.W = k, feat_dim, num_classes
        for c in range(self.num_classes):
            centers = F.normalize(self.W[:,:,c], dim=1) # 归一化
            sim_matrix = centers @ centers.T # 余弦相似度矩阵
            upper_tri = sim_matrix.triu(diagonal=1) # 取上三角
            avg_sim = upper_tri.sum() / (self.k*(self.k-1)/2)
            loss += F.relu(avg_sim - self.tau) # 低于阈值时惩罚
        return loss*self.tau_factor

    def forward(self, x, labels):
        """
        计算 Sub-center ArcFace 损失。

        参数:
            x (torch.Tensor): 输入特征向量，形状为 (batch_size, embedding_size)。
            labels (torch.Tensor): 类别标签，形状为 (batch_size,)。

        返回:
            torch.Tensor: 损失值。
        """
        # 归一化特征向量和权重
        x_norm = F.normalize(x, p=2, dim=1)  # (batch_size, embedding_size)
        W_norm = F.normalize(self.W, p=2, dim=2)  # (num_sub_centers, embedding_size, num_classes)

        # 计算余弦相似度
        cosine = torch.matmul(x_norm, W_norm)  # (num_sub_centers, batch_size, num_classes)
        # cosine = cosine.transpose(0, 1)
        cosine = cosine.max(dim=0).values  # (batch_size, num_classes)
        cosine = torch.clamp(cosine, -1.0 + 1e-7, 1.0 - 1e-7)  # (batch_size, num_classes)

        # 计算目标类别的角度
        sine = torch.sqrt(1.0 - torch.pow(cosine, 2)).clamp(0,1)
        phi = cosine*self.cos_margin - sine*self.sin_margin
        if self.easy_margin: 
            phi = torch.where(cosine > 0, phi, cosine)
        else: 
            # phi = torch.where(cosine > self.th, phi, cosine - self.mm)
            phi = torch.where(cosine > self.threshold, phi, cosine - self.mmm)

        one_hot = F.one_hot(labels, self.num_classes)  # (batch_size, num_classes)
        # 缩放 logits
        logits = self.scale * (one_hot * phi + (1 - one_hot) * cosine)

        return logits, self.diversity_penalty()