import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np

# class PatchLMMDLoss(nn.Module):
#     def __init__(self, num_classes, kernel_mul=2.0, kernel_num=5, fix_sigma=None, top_k_ratio=0.5):
#         super(PatchLMMDLoss, self).__init__()
#         self.num_classes = num_classes
#         self.kernel_mul = kernel_mul
#         self.kernel_num = kernel_num
#         self.fix_sigma = fix_sigma
#         self.top_k_ratio = top_k_ratio

#     def forward(self, src_feat, tgt_feat, src_labels, tgt_logits):
#         """
#         src_feat, tgt_feat: shape [B, L, D]
#         src_labels: [B]
#         tgt_logits: [B, C]
#         """
#         B, L, D = src_feat.shape
        
#         # Generate target pseudo labels and confidences
#         tgt_probs = F.softmax(tgt_logits, dim=1)
#         tgt_pseudo = tgt_probs.argmax(dim=1)  # [B]
        
#         # Generate patch confidence scores (using feature norms as example)
#         src_conf = torch.norm(src_feat, p=2, dim=2)  # [B, L]
#         tgt_conf = torch.norm(tgt_feat, p=2, dim=2)  # [B, L]
        
#         # Calculate number of patches to select
#         k = max(1, int(self.top_k_ratio * L))
        
#         # Select top-k patches for each sample
#         src_topk_idx = torch.topk(src_conf, k=k, dim=1)[1]  # [B, k]
#         tgt_topk_idx = torch.topk(tgt_conf, k=k, dim=1)[1]  # [B, k]
        
#         # Gather selected features
#         src_topk_feats = torch.gather(src_feat, 1, src_topk_idx.unsqueeze(-1).expand(-1, -1, D))  # [B, k, D]
#         tgt_topk_feats = torch.gather(tgt_feat, 1, tgt_topk_idx.unsqueeze(-1).expand(-1, -1, D))  # [B, k, D]
        
#         # Calculate class-wise MMD
#         total_loss = 0.0
#         valid_classes = 0
#         for cls in range(self.num_classes):
#             # Get class masks
#             src_mask = (src_labels == cls)
#             tgt_mask = (tgt_pseudo == cls)
            
#             # Skip if no samples in either domain
#             if src_mask.sum() == 0 or tgt_mask.sum() == 0:
#                 continue
                
#             # Get class features [N, k, D]
#             src_cls_feats = src_topk_feats[src_mask]
#             tgt_cls_feats = tgt_topk_feats[tgt_mask]
            
#             # # Flatten to [N*k, D]
#             # src_cls_feats = src_cls_feats.reshape(-1, D)
#             # tgt_cls_feats = tgt_cls_feats.reshape(-1, D)
#             src_cls_feats = torch.mean(src_cls_feats, dim=0)  # [N, D]
#             tgt_cls_feats = torch.mean(tgt_cls_feats, dim=0)
            
#             # Compute MMD
#             total_loss += self.compute_mmd(src_cls_feats, tgt_cls_feats)
#             valid_classes += 1
            
#         return total_loss / valid_classes if valid_classes > 0 else torch.tensor(0.0).to(src_feat.device)

#     def compute_mmd(self, x, y):
#         x_size = x.size(0)
#         y_size = y.size(0)
#         xx, yy, xy = torch.mm(x, x.t()), torch.mm(y, y.t()), torch.mm(x, y.t())
        
#         rx = xx.diag().unsqueeze(0).expand_as(xx)
#         ry = yy.diag().unsqueeze(0).expand_as(yy)
        
#         dxx = rx.t() + rx - 2 * xx
#         dyy = ry.t() + ry - 2 * yy
#         dxy = rx.t() + ry - 2 * xy
        
#         XX, YY, XY = torch.zeros_like(dxx), torch.zeros_like(dyy), torch.zeros_like(dxy)
        
#         if self.fix_sigma:
#             bandwidth = self.fix_sigma
#         else:
#             bandwidth = torch.sum(dxx.data) / (x_size ** 2 - x_size) + \
#                         torch.sum(dyy.data) / (y_size ** 2 - y_size) + \
#                         torch.sum(dxy.data) / (x_size * y_size)
#             bandwidth /= 3.0
            
#         bandwidth_list = [bandwidth * (self.kernel_mul ** i) for i in range(self.kernel_num)]
        
#         for bw in bandwidth_list:
#             XX += torch.exp(-dxx / bw)
#             YY += torch.exp(-dyy / bw)
#             XY += torch.exp(-dxy / bw)
            
#         return (XX.mean() + YY.mean() - 2 * XY.mean()) / len(bandwidth_list)
    

class PatchLMMDLoss(nn.Module):
    def __init__(self, num_classes, kernel_mul=2.0, kernel_num=5, fix_sigma=None, top_k_ratio=0.75, eps=1e-6):
        super(PatchLMMDLoss, self).__init__()
        self.num_classes = num_classes
        self.kernel_mul = kernel_mul
        self.kernel_num = kernel_num
        self.fix_sigma = fix_sigma
        self.top_k_ratio = top_k_ratio
        self.eps = eps  # 防止除零的小量

    def forward(self, src_feat, tgt_feat, src_labels, tgt_logits):
        """
        src_feat, tgt_feat: shape [B, L, D]
        src_labels: [B]
        tgt_logits: [B, C]
        """
        B, L, D = src_feat.shape
        
        # Generate target pseudo labels and confidences
        tgt_probs = F.softmax(tgt_logits, dim=1)
        tgt_pseudo = tgt_probs.argmax(dim=1)  # [B]
        
        # Calculate class weights
        cls_weights = self._cal_class_weights(src_labels, tgt_pseudo)
        
        # Generate patch confidence scores (using feature entropy)
        src_conf = self._calc_patch_confidence(src_feat)  # [B, L]
        tgt_conf = self._calc_patch_confidence(tgt_feat)  # [B, L]
        
        # Calculate number of patches to select
        k = max(1, int(self.top_k_ratio * L))
        
        # Select top-k patches for each sample
        src_topk_feats = self._select_topk_patches(src_feat, src_conf, k)
        tgt_topk_feats = self._select_topk_patches(tgt_feat, tgt_conf, k)
        
        # Calculate weighted class-wise MMD
        total_loss = 0.0
        valid_classes = 0
        for cls in range(self.num_classes):
            # Skip if weight is too small
            if cls_weights[cls] < self.eps:
                continue
                
            # Get class masks
            src_mask = (src_labels == cls)
            tgt_mask = (tgt_pseudo == cls)
            
            # Skip if no samples in either domain
            if src_mask.sum() == 0 or tgt_mask.sum() == 0:
                continue
                
            # Get class features [N, k, D]
            src_cls_feats = src_topk_feats[src_mask]
            tgt_cls_feats = tgt_topk_feats[tgt_mask]
            
            # Flatten to [N*k, D]
            src_cls_feats = src_cls_feats.reshape(-1, D)
            tgt_cls_feats = tgt_cls_feats.reshape(-1, D)
            
            # Compute weighted MMD
            loss = self.compute_mmd(src_cls_feats, tgt_cls_feats)
            total_loss += cls_weights[cls] * loss
            valid_classes += 1
            
        return total_loss / valid_classes if valid_classes > 0 else torch.tensor(0.0).cuda()

    def _cal_class_weights(self, src_labels, tgt_pseudo):
        """计算类别权重（样本数越少，权重越高）"""
        src_labels = src_labels.cpu().numpy()
        tgt_pseudo = tgt_pseudo.cpu().numpy()
        
        # 统计源域和目标域的类别分布 [C]
        src_cls_counts = np.bincount(src_labels, minlength=self.num_classes)
        tgt_cls_counts = np.bincount(tgt_pseudo, minlength=self.num_classes)
        
        # 计算联合分布（取较小值，避免某一域缺失类别）
        joint_counts = np.minimum(src_cls_counts, tgt_cls_counts)
        
        # 权重 = 总样本数 / (类别样本数 + eps)，归一化到[0,1]
        weights = (joint_counts.sum() + self.eps) / (joint_counts + self.eps)
        weights = weights / weights.sum()  # 归一化
        
        return torch.from_numpy(weights.astype('float32')).cuda()

    def _calc_patch_confidence(self, feat):
        """基于特征多样性计算patch置信度"""
        # 方法1：特征范数
        return torch.norm(feat, p=2, dim=2)
        
        # # 方法2：特征熵（更鲁棒）
        # B, L, D = feat.shape
        # feat_flat = feat.reshape(B*L, D)
        # probs = F.softmax(feat_flat, dim=1)
        # entropy = -torch.sum(probs * torch.log(probs + self.eps), dim=1)
        # return (1.0 - entropy.reshape(B, L))  # 熵越小，置信度越高

    def _select_topk_patches(self, feat, conf, k):
        """选择置信度最高的Top-K Patch"""
        topk_idx = torch.topk(conf, k=k, dim=1)[1]  # [B, k]
        return torch.gather(feat, 1, topk_idx.unsqueeze(-1).expand(-1, -1, feat.size(2)))

    def compute_mmd(self, x, y):
        """优化后的MMD计算"""
        x_size = x.size(0)
        y_size = y.size(0)
        
        # 合并计算所有核函数的相似度矩阵
        xx, yy, xy = torch.mm(x, x.t()), torch.mm(y, y.t()), torch.mm(x, y.t())
        
        rx = xx.diag().unsqueeze(0).expand_as(xx)
        ry = yy.diag().unsqueeze(0).expand_as(yy)
        
        dxx = rx.t() + rx - 2 * xx
        dyy = ry.t() + ry - 2 * yy
        dxy = rx.t() + ry - 2 * xy
        
        if self.fix_sigma:
            bandwidth = self.fix_sigma
        else:
            # 添加eps保证数值稳定性
            bandwidth = (torch.sum(dxx.data) / max((x_size ** 2 - x_size), 1) +
                        torch.sum(dyy.data) / max((y_size ** 2 - y_size), 1) +
                        torch.sum(dxy.data) / max((x_size * y_size), 1)) / 3.0
            
        bandwidth_list = [bandwidth * (self.kernel_mul ** i) for i in range(self.kernel_num)]
        
        # 向量化计算多个核函数
        loss = 0.0
        for bw in bandwidth_list:
            K_xx = torch.exp(-dxx / bw)
            K_yy = torch.exp(-dyy / bw)
            K_xy = torch.exp(-dxy / bw)
            loss += (K_xx.mean() + K_yy.mean() - 2 * K_xy.mean())
            
        return loss / len(bandwidth_list)

def mmd_loss_vectorized(X, Y, sigma=1.0):
    """
    并行计算多组样本的MMD (X和Y的每组Patch视为独立分布)
    Args:
        X: (B, L, D) 源域特征
        Y: (B, L, D) 目标域特征
    Returns:
        (L,) 每个Patch的MMD均值
    """
    XX = torch.exp(-torch.cdist(X, X) / (2 * sigma**2))  # (B, L, L)
    YY = torch.exp(-torch.cdist(Y, Y) / (2 * sigma**2))
    XY = torch.exp(-torch.cdist(X, Y) / (2 * sigma**2))
    
    mmd = XX.mean(dim=(0, 1)) + YY.mean(dim=(0, 1)) - 2 * XY.mean(dim=(0, 1))
    return mmd.mean()  # 返回所有Patch的平均MMD

class PatchAlignLoss(nn.Module):
    def __init__(self, sigma=2.0):
        super().__init__()
        self.sigma = sigma

    def forward(self, src, tgt):
        B, L, D = src.shape
        
        # 显著性排序（与原始代码一致）
        src_saliency = torch.norm(src, dim=2)  # (B, L)
        tgt_saliency = torch.norm(tgt, dim=2)
        
        _, src_indices = torch.sort(src_saliency, dim=1, descending=True)
        _, tgt_indices = torch.sort(tgt_saliency, dim=1, descending=True)
        
        src_sorted = torch.gather(src, 1, src_indices.unsqueeze(2).expand(-1, -1, D))
        tgt_sorted = torch.gather(tgt, 1, tgt_indices.unsqueeze(2).expand(-1, -1, D))

        # 并行计算所有Patch的MMD
        return mmd_loss_vectorized(src_sorted, tgt_sorted, self.sigma)
    


class PatchCORALLoss(nn.Module):
    def __init__(self, num_classes, top_k_ratio=0.75, eps=1e-6):
        super(PatchCORALLoss, self).__init__()
        self.num_classes = num_classes
        self.top_k_ratio = top_k_ratio
        self.eps = eps

    def forward(self, src_feat, tgt_feat, src_labels, tgt_logits):
        B, L, D = src_feat.shape
        
        # Generate target pseudo labels
        tgt_probs = F.softmax(tgt_logits, dim=1)
        tgt_pseudo = tgt_probs.argmax(dim=1)  # [B]
        cls_weights = self._cal_class_weights(src_labels, tgt_pseudo)
        
        # Select top-k patches based on feature norms
        k = max(1, int(self.top_k_ratio * L))
        src_topk_idx = torch.topk(torch.norm(src_feat, p=2, dim=2), k=k, dim=1)[1]  # [B, k]
        tgt_topk_idx = torch.topk(torch.norm(tgt_feat, p=2, dim=2), k=k, dim=1)[1]  # [B, k]
        
        # Gather selected features [B, k, D]
        src_topk_feats = torch.gather(src_feat, 1, src_topk_idx.unsqueeze(-1).expand(-1, -1, D))
        tgt_topk_feats = torch.gather(tgt_feat, 1, tgt_topk_idx.unsqueeze(-1).expand(-1, -1, D))
        
        # Class-wise CORAL loss
        total_loss = 0.0
        valid_classes = 0
        for cls in range(self.num_classes):
            src_mask = (src_labels == cls)
            tgt_mask = (tgt_pseudo == cls)
            
            if src_mask.sum() == 0 or tgt_mask.sum() == 0:
                continue
                
            # Extract class-specific features [N, k, D]
            src_cls_feats = src_topk_feats[src_mask]  # [N1, k, D]
            tgt_cls_feats = tgt_topk_feats[tgt_mask]  # [N2, k, D]
            
            # Flatten all patches to [N*k, D] and compute CORAL
            # src_cls_feats = src_cls_feats.reshape(-1, D)
            # tgt_cls_feats = tgt_cls_feats.reshape(-1, D)
            src_cls_feats = torch.mean(src_cls_feats, dim=0)  # [k, D]
            tgt_cls_feats = torch.mean(tgt_cls_feats, dim=0)
            
            total_loss += self.coral_loss(src_cls_feats, tgt_cls_feats)* cls_weights[cls]
            valid_classes += 1
            
        return total_loss / valid_classes if valid_classes > 0 else torch.tensor(0.0).to(src_feat.device)
    
    def _cal_class_weights(self, src_labels, tgt_pseudo):
        """计算类别权重（样本数越少，权重越高）"""
        src_labels = src_labels.cpu().numpy()
        tgt_pseudo = tgt_pseudo.cpu().numpy()
        
        # 统计源域和目标域的类别分布 [C]
        src_cls_counts = np.bincount(src_labels, minlength=self.num_classes)
        tgt_cls_counts = np.bincount(tgt_pseudo, minlength=self.num_classes)
        
        # 计算联合分布（取较小值，避免某一域缺失类别）
        joint_counts = np.minimum(src_cls_counts, tgt_cls_counts)
        
        # 权重 = 总样本数 / (类别样本数 + eps)，归一化到[0,1]
        weights = (joint_counts.sum() + self.eps) / (joint_counts + self.eps)
        weights = weights / weights.sum()  # 归一化
        
        return torch.from_numpy(weights.astype('float32')).cuda()

    def coral_loss(self, source, target):
        """
        Optimized CORAL loss based on your provided function.
        Args:
            source: [N1, D] (flattened source patches)
            target: [N2, D] (flattened target patches)
        """
        d = source.size(1)
        ns, nt = source.size(0), target.size(0)

        # Source covariance
        xm = torch.mean(source, 0, keepdim=True) - source
        xc = xm.t() @ xm / (ns - 1)

        # Target covariance
        xmt = torch.mean(target, 0, keepdim=True) - target
        xct = xmt.t() @ xmt / (nt - 1)

        # Frobenius norm
        loss = torch.sum((xc - xct) ** 2) / (4 * d * d)
        return loss


import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np


class CrossMatchedPatchLMMDLoss(nn.Module):
    def __init__(self, num_classes, top_k_ratio=0.75, sim_metric='cosine', eps=1e-6):
        super(CrossMatchedPatchLMMDLoss, self).__init__()
        self.num_classes = num_classes
        self.top_k_ratio = top_k_ratio
        self.sim_metric = sim_metric
        self.eps = eps

    def forward(self, src_feat, tgt_feat, src_labels, tgt_logits):
        """
        src_feat, tgt_feat: [B, L, D]
        src_labels: [B]
        tgt_logits: [B, C]
        """
        B, L, D = src_feat.shape

        # target pseudo label + confidence
        tgt_probs = F.softmax(tgt_logits, dim=1)
        tgt_pseudo = tgt_probs.argmax(dim=1)  # [B]

        # class weights
        cls_weights = self._cal_class_weights(src_labels, tgt_pseudo)

        # patch confidence score
        src_conf = self._calc_patch_confidence(src_feat)
        tgt_conf = self._calc_patch_confidence(tgt_feat)

        k = max(1, int(self.top_k_ratio * L))

        # get top-K patches
        src_topk = self._select_topk_patches(src_feat, src_conf, k)
        tgt_topk = self._select_topk_patches(tgt_feat, tgt_conf, k)

        total_loss = 0.0
        valid_classes = 0

        for cls in range(self.num_classes):
            if cls_weights[cls] < self.eps:
                continue

            src_mask = (src_labels == cls)
            tgt_mask = (tgt_pseudo == cls)

            if src_mask.sum() == 0 or tgt_mask.sum() == 0:
                continue

            src_cls = src_topk[src_mask]  # [Ns, k, D]
            tgt_cls = tgt_topk[tgt_mask]  # [Nt, k, D]

            # flatten: [Ns*k, D], [Nt*k, D]
            src_patches = src_cls.reshape(-1, D)
            tgt_patches = tgt_cls.reshape(-1, D)

            loss = self._cross_patch_match_loss(src_patches, tgt_patches)
            total_loss += cls_weights[cls] * loss
            valid_classes += 1

        return total_loss / valid_classes if valid_classes > 0 else torch.tensor(0.0, device=src_feat.device)

    def _cal_class_weights(self, src_labels, tgt_pseudo):
        src_labels = src_labels.cpu().numpy()
        tgt_pseudo = tgt_pseudo.cpu().numpy()

        src_counts = np.bincount(src_labels, minlength=self.num_classes)
        tgt_counts = np.bincount(tgt_pseudo, minlength=self.num_classes)
        joint = np.minimum(src_counts, tgt_counts)

        weights = (joint.sum() + self.eps) / (joint + self.eps)
        weights = weights / weights.sum()
        return torch.from_numpy(weights.astype('float32')).to(device='cuda')

    def _calc_patch_confidence(self, feat):
        return torch.norm(feat, dim=2)

    def _select_topk_patches(self, feat, conf, k):
        topk_idx = torch.topk(conf, k=k, dim=1)[1]
        return torch.gather(feat, 1, topk_idx.unsqueeze(-1).expand(-1, -1, feat.size(2)))  # [B, k, D]

    def _cross_patch_match_loss(self, src_patches, tgt_patches, temp=0.1):
        """
        src_patches: [Ns, D]
        tgt_patches: [Nt, D]
        """
        src_norm = F.normalize(src_patches, dim=1)
        tgt_norm = F.normalize(tgt_patches, dim=1)

        sim_matrix = torch.mm(src_norm, tgt_norm.t())  # [Ns, Nt]

        # === Src -> Tgt soft matching ===
        weight_st = F.softmax(sim_matrix / temp, dim=1)       # [Ns, Nt]
        matched_tgt = torch.mm(weight_st, tgt_patches)        # [Ns, D]
        loss_st = 1 - F.cosine_similarity(src_patches, matched_tgt, dim=1).mean()

        # === Tgt -> Src soft matching ===
        weight_ts = F.softmax(sim_matrix.t() / temp, dim=1)   # [Nt, Ns]
        matched_src = torch.mm(weight_ts, src_patches)        # [Nt, D]
        loss_ts = 1 - F.cosine_similarity(tgt_patches, matched_src, dim=1).mean()

        return 0.5 * (loss_st + loss_ts)

class AdaptivePatchWeightLoss(nn.Module):
    def __init__(self, num_classes, feature_dim, top_k_ratio=0.75, eps=1e-6):
        super(AdaptivePatchWeightLoss, self).__init__()
        self.num_classes = num_classes
        self.feature_dim = feature_dim
        self.top_k_ratio = top_k_ratio
        self.eps = eps
        
        # 学习patch重要性的网络
        self.patch_importance_net = nn.Sequential(
            nn.Linear(feature_dim, feature_dim // 2),
            nn.ReLU(),
            nn.Dropout(0.1),
            nn.Linear(feature_dim // 2, 1),
            nn.Sigmoid()
        )
        
        # 学习域不变特征提取器
        self.domain_invariant_net = nn.Sequential(
            nn.Linear(feature_dim, feature_dim),
            nn.ReLU(),
            nn.Dropout(0.1),
            nn.Linear(feature_dim, feature_dim)
        )

    def forward(self, src_feat, tgt_feat, src_labels, tgt_logits):
        B, L, D = src_feat.shape
        
        # 生成目标伪标签
        tgt_probs = F.softmax(tgt_logits, dim=1)
        tgt_pseudo = tgt_probs.argmax(dim=1)
        
        # 计算类别权重
        cls_weights = self._cal_class_weights(src_labels, tgt_pseudo)
        
        # 学习patch重要性权重
        src_importance = self.patch_importance_net(src_feat.reshape(-1, D)).reshape(B, L)
        tgt_importance = self.patch_importance_net(tgt_feat.reshape(-1, D)).reshape(B, L)
        
        # 选择重要patch
        k = max(1, int(self.top_k_ratio * L))
        src_topk = self._select_weighted_patches(src_feat, src_importance, k)
        tgt_topk = self._select_weighted_patches(tgt_feat, tgt_importance, k)
        
        # 提取域不变特征
        src_invariant = self.domain_invariant_net(src_topk.reshape(-1, D)).reshape(src_topk.shape)
        tgt_invariant = self.domain_invariant_net(tgt_topk.reshape(-1, D)).reshape(tgt_topk.shape)
        
        total_loss = 0.0
        valid_classes = 0
        
        for cls in range(self.num_classes):
            if cls_weights[cls] < self.eps:
                continue
                
            src_mask = (src_labels == cls)
            tgt_mask = (tgt_pseudo == cls)
            
            if src_mask.sum() == 0 or tgt_mask.sum() == 0:
                continue
                
            src_cls = src_invariant[src_mask]  # [Ns, k, D]
            tgt_cls = tgt_invariant[tgt_mask]  # [Nt, k, D]
            
            # 计算加权MMD损失
            loss = self._weighted_mmd_loss(src_cls, tgt_cls)
            total_loss += cls_weights[cls] * loss
            valid_classes += 1
            
        return total_loss / valid_classes if valid_classes > 0 else torch.tensor(0.0, device=src_feat.device)

    def _cal_class_weights(self, src_labels, tgt_pseudo):
        src_labels = src_labels.cpu().numpy()
        tgt_pseudo = tgt_pseudo.cpu().numpy()
        
        src_counts = np.bincount(src_labels, minlength=self.num_classes)
        tgt_counts = np.bincount(tgt_pseudo, minlength=self.num_classes)
        joint = np.minimum(src_counts, tgt_counts)
        
        weights = (joint.sum() + self.eps) / (joint + self.eps)
        weights = weights / weights.sum()
        return torch.from_numpy(weights.astype('float32')).to(device='cuda')

    def _select_weighted_patches(self, feat, importance, k):
        # 基于学习的重要性选择patch
        topk_idx = torch.topk(importance, k=k, dim=1)[1]
        return torch.gather(feat, 1, topk_idx.unsqueeze(-1).expand(-1, -1, feat.size(2)))

    def _weighted_mmd_loss(self, src_feats, tgt_feats, sigma=1.0):
        # 简化的加权MMD损失
        src_mean = src_feats.mean(dim=1)  # [Ns, D]
        tgt_mean = tgt_feats.mean(dim=1)  # [Nt, D]
        
        # 计算核矩阵
        src_src = torch.exp(-torch.cdist(src_mean, src_mean) / (2 * sigma**2))
        tgt_tgt = torch.exp(-torch.cdist(tgt_mean, tgt_mean) / (2 * sigma**2))
        src_tgt = torch.exp(-torch.cdist(src_mean, tgt_mean) / (2 * sigma**2))
        
        mmd = src_src.mean() + tgt_tgt.mean() - 2 * src_tgt.mean()
        return mmd
