import torch
import torch.nn.functional as F
import torch.nn as nn
from pytorch_metric_learning.distances import CosineSimilarity
from torch.cuda.amp import autocast
from typing import Optional, Tuple

class TripletLoss(nn.Module):

    def __init__(self, margin=0.2, mining='semi-hard', distance='L2'):
        """
        Args:
            margin: Triplet Loss 的 margin。
            mining(str): 三元组挖掘类型，支持 'hard', 'semi-hard', 'batch-all'.
        """
        super(TripletLoss, self).__init__()
        self.margin = margin
        self.mining = mining
        assert mining in ['hard', 'semi-hard', 'batch-all'], "mining 必须是 'hard', 'semi-hard', 或 'batch-all'"
        self.distance = distance

    def forward(self, embeddings, labels):
        """
        Args:
            embeddings (torch.Tensor): 输入的嵌入向量，形状为 (batch_size, embedding_dim)。
            labels (torch.Tensor): 对应的标签，形状为 (batch_size,)。
        Returns:
            torch.Tensor: 计算得到的 Triplet Loss。
        """
        if self.distance == 'L2':
            pairwise_dist = torch.cdist(embeddings, embeddings, p=2)  # 使用 torch.cdist 计算欧氏距离
        elif self.distance == 'Cos':
            dist_fn = CosineSimilarity()
            pairwise_dist = dist_fn(embeddings, embeddings)
        elif self.distance == 'style':
            pairwise_dist = style_cost(embeddings)

        else:
            raise ValueError("Distance function should be 'L2', 'Cos'")

        triplet_loss = self._get_triplet_loss(pairwise_dist, labels)  # 根据 mining 方式计算损失
        return triplet_loss

    def _get_triplet_loss(self, pairwise_dist, labels):
        """
        根据 mining 方式计算 Triplet Loss。
        Args:
            pairwise_dist (torch.Tensor): 样本之间的距离矩阵，形状为 (batch_size, batch_size)。
            labels (torch.Tensor): 对应的标签，形状为 (batch_size,)。
        Returns:
            torch.Tensor: 计算得到的 Triplet Loss。
        """
        mask_anchor_positive = self._get_anchor_positive_mask(labels)
        mask_anchor_negative = self._get_anchor_negative_mask(labels)

        if self.mining == 'batch-all':
            # 使用所有有效的三元组
            loss = self._batch_all_triplet_loss(pairwise_dist, mask_anchor_positive, mask_anchor_negative)
        elif self.mining == 'hard':
            # 使用最难的三元组
            loss = self._hard_triplet_loss(pairwise_dist, mask_anchor_positive, mask_anchor_negative)
        elif self.mining == 'semi-hard':
            # 使用 semi-hard 三元组
            loss = self._semi_hard_triplet_loss(pairwise_dist, mask_anchor_positive, mask_anchor_negative)
        else:
            raise ValueError(f"不支持的 mining 方式:{self.mining}")

        return loss

    def _get_anchor_positive_mask(self, labels):
        """
        获取 Anchor 和 Positive 的掩码矩阵。
        Args:
            labels (torch.Tensor): 对应的标签，形状为 (batch_size,)。
        Returns:
            torch.Tensor: Anchor 和 Positive 的掩码矩阵，形状为 (batch_size, batch_size)。
        """
        indices_equal = torch.eye(labels.size(0), device=labels.device).bool()
        indices_not_equal = ~indices_equal
        labels_equal = labels.unsqueeze(0) == labels.unsqueeze(1)
        mask = indices_not_equal & labels_equal
        return mask

    def _get_anchor_negative_mask(self, labels):
        """
        获取 Anchor 和 Negative 的掩码矩阵。
        Args:
            labels (torch.Tensor): 对应的标签，形状为 (batch_size,)。
        Returns:
            torch.Tensor: Anchor 和 Negative 的掩码矩阵，形状为 (batch_size, batch_size)。
        """
        labels_equal = labels.unsqueeze(0) == labels.unsqueeze(1)
        mask = ~labels_equal
        return mask

    def _batch_all_triplet_loss(self, pairwise_dist, mask_anchor_positive, mask_anchor_negative):
        """
        计算 batch-all 的 Triplet Loss。
        """
        # 计算 Anchor-Positive 和 Anchor-Negative 的距离
        ap_dist = pairwise_dist.unsqueeze(2)
        an_dist = pairwise_dist.unsqueeze(1)
        triplet_loss = ap_dist - an_dist + self.margin

        # 过滤无效的三元组
        mask_valid = mask_anchor_positive.unsqueeze(2) & mask_anchor_negative.unsqueeze(1)
        mask_valid = mask_valid.float()
        triplet_loss = triplet_loss * mask_valid
        triplet_loss = torch.clamp(triplet_loss, min=0.0)

        # 计算平均损失
        num_valid_triplets = torch.sum(mask_valid)
        if num_valid_triplets > 0:
            triplet_loss = torch.sum(triplet_loss) / num_valid_triplets
        else:
            triplet_loss = torch.tensor(0.0, device=pairwise_dist.device)

        return triplet_loss

    def _hard_triplet_loss(self, pairwise_dist, mask_anchor_positive, mask_anchor_negative):
        """
        计算 hard mining 的 Triplet Loss。
        """
        # 找到最难的 Positive 和 Negative
        hardest_positive_dist = torch.max(pairwise_dist * mask_anchor_positive, dim=1, keepdim=True)[0]
        hardest_negative_dist = torch.min(pairwise_dist + 1e6 * (~mask_anchor_negative).float(), dim=1, keepdim=True)[0]

        # 计算 Triplet Loss
        triplet_loss = torch.clamp(hardest_positive_dist - hardest_negative_dist + self.margin, min=0.0)
        triplet_loss = torch.mean(triplet_loss)

        return triplet_loss

    def _semi_hard_triplet_loss(self, pairwise_dist, mask_anchor_positive, mask_anchor_negative):
        """
        计算 semi-hard mining 的 Triplet Loss。
        """
        # 找到 semi-hard Negative
        ap_dist = pairwise_dist.unsqueeze(2)
        an_dist = pairwise_dist.unsqueeze(1)
        triplet_loss = ap_dist - an_dist + self.margin

        # 过滤无效的三元组
        mask_valid = mask_anchor_positive.unsqueeze(2) & mask_anchor_negative.unsqueeze(1)
        mask_valid = mask_valid.float()
        triplet_loss = triplet_loss * mask_valid

        # 找到 semi-hard Negative（满足条件的 Negative）
        semi_hard_mask = (triplet_loss > 0) & (triplet_loss < self.margin)
        semi_hard_mask = semi_hard_mask.float()
        num_semi_hard = torch.sum(semi_hard_mask)

        if num_semi_hard > 0:
            triplet_loss = torch.sum(triplet_loss * semi_hard_mask) / num_semi_hard
        else:
            triplet_loss = torch.sum(triplet_loss * semi_hard_mask)

        return triplet_loss

    def _cos_sim(self, x, y):
        # x, y:(batch_size, c)
        # normalize input
        x_norm = F.normalize(x, p=2, dim=1)
        y_norm = F.normalize(y, p=2, dim=1)
        # caculate cosin_simlarity matrix
        return 1- torch.matmul(x_norm, y_norm.t())

def style_cost(x: torch.Tensor) -> torch.Tensor:
    """
    优化后的风格代价矩阵计算函数，减少显存占用并提升计算效率。

    参数:
        #(弃用) x (torch.Tensor): 输入张量，形状为[B, C, H, W]
        x (torch.Tensor): 输入打平后的Gram矩阵，形状为[B, C*C]


    返回:
        torch.Tensor: 形状为[B, B]的距离矩阵，元素为两个样本Gram矩阵的Frobenius范数
    """
    assert x.dim() == 2, 'unsupported shape for style_cost'
    B, _ = x.shape
    
    # 展平特征图并计算Gram矩阵（保持3D张量形式），由于在模型forward中已经计算得到Gram矩阵，此处不再计算
    # x_flat = x.view(B, C, -1)                     # [B, C, H*W]
    # gram = torch.bmm(x_flat, x_flat.transpose(1, 2))  # [B, C, C]
    # gram = gram / (H * W)                          # 归一化[B,C,C]
    
    # 展平每个Gram矩阵为向量，形状(B, C*C), 由于输入已经是打平的Gram矩阵，这里不再重复
    # g_flat = gram.view(B, -1)   #[B, C*C]
    # g_flat = x.view(B, -1)
    # 计算每个向量的平方和，形状(B,)
    gram_square = torch.sum(x**2, dim=1)
    # 计算Gram矩阵的内积，形状(B, B)
    gram_dots = torch.mm(x,x.t())
    
    # 向量化计算距离矩阵
    distance_matrix = (
        gram_square.unsqueeze(1) + gram_square.unsqueeze(0) - 2*gram_dots
    ).clamp(min=0)  # [B, B]
    
    # [B, B]
    return distance_matrix

# 张量操作计算Gram矩阵
def compute_gram(x: torch.Tensor) -> torch.Tensor:
    """ 计算输入特征的Gram矩阵 """
    B, C, H, W = x.shape
    x_flat = x.view(B, C, -1)
    grams = torch.bmm(x_flat, x_flat.transpose(1, 2)) / (H * W)
    return grams  # [B, C, C]

def styleKnn(
    query: torch.Tensor,
    reference: Optional[torch.Tensor] = None,
    K: int = 2,
    chunk_size = 64
):
    """
    基于风格距离的分块K近邻搜索
    
    参数:
        query (Tensor): 验证集Gram矩阵 (B, C*C)
        reference (Tensor): 训练集Gram矩阵 (N, C*C)
        K (int): 近邻数
        chunk_size (int): 分块大小
        
    返回:
        Tuple(Tensor, Tensor): (最小距离(B, K), 最近邻索引(B, K))
    """
    device = query.device
    B, D = query.shape
    N = reference.shape[0]

    # 预计算平方和
    query_sq = torch.sum(query**2, dim=1)  # (B,)
    ref_sq = torch.sum(reference**2, dim=1)  # (N,)

    # 初始化存储结果
    top_distances = torch.full((B, K), float('inf'), device=device)
    top_indices = torch.full((B, K), -1, dtype=torch.long, device=device)

    # 设置分块大小为batch_size
    chunk_size = B
    # 分块处理训练集
    for start in range(0, N, chunk_size):
        end = min(start + chunk_size, N)
        current_chunk_size = end - start
        
        # 获取当前分块数据
        ref_chunk = reference[start:end]  # (chunk_size, D)
        ref_sq_chunk = ref_sq[start:end]  # (chunk_size,)
        
        # 计算分块距离矩阵
        dot_products = torch.mm(query, ref_chunk.t())  # (B, chunk_size)
        distances = query_sq.unsqueeze(1) + ref_sq_chunk.unsqueeze(0) - 2*dot_products  # (B, chunk_size)
        
        # 生成当前块的全局索引
        global_indices = torch.arange(start, end, device=device)
        
        # ========== 动态更新TopK策略 ==========
        # 合并候选集：当前TopK + 当前块的所有候选
        combined_dist = torch.cat([top_distances, distances], dim=1)  # (B, K+current_chunk_size)
        combined_indices = torch.cat([top_indices, global_indices.expand(B, -1)], dim=1)
        
        # 选取新的TopK
        topk_values, topk_indices = combined_dist.topk(k=K, dim=1, largest=False)
        
        # 使用gather获取正确的全局索引
        new_top_indices = torch.gather(combined_indices, 1, topk_indices)
        
        # 更新存储
        top_distances = topk_values
        top_indices = new_top_indices

    return top_distances, top_indices
