import torch
import torch.nn.functional as F
import torchvision.transforms.functional as TF

def intra_patch_shuffle(feat, logits, top_percent=0.25):
    """
    仅进行样本内 patch shuffle 的扰动增强。
    Args:
        feat: 当前层输入特征 [B, C, H, W]
        logits: 对应 softmax 预测 [B, num_classes]，用于显著性引导
        top_percent: 显著性前百分比用于保留，其余区域进行打乱
    Returns:
        feat_shuffled: 增强后的特征 [B, C, H, W]
    """
    B, C, H, W = feat.shape
    feat = feat.contiguous().requires_grad_(True)

    # 计算显著性图（类别相关性引导）
    probs = F.softmax(logits, dim=1)
    scores = torch.sum(probs * logits, dim=1)  # [B]
    loss = scores.sum()
    grad = torch.autograd.grad(loss, feat, retain_graph=True)[0]  # [B, C, H, W]

    saliency = (grad * feat).sum(dim=1, keepdim=True)  # [B, 1, H, W]
    saliency = F.interpolate(saliency, size=(H, W), mode='bilinear', align_corners=False)
    sal_flat = saliency.view(B, -1)

    # 按照显著性排序 patch
    num_patch = H * W
    k = int(num_patch * top_percent)
    _, shuffle_idx = torch.topk(sal_flat, k=num_patch - k, dim=1, largest=False)
    _, keep_idx = torch.topk(sal_flat, k=k, dim=1, largest=True)

    # flatten 特征，准备 shuffle
    feat_view = feat.view(B, C, -1).permute(0, 2, 1)  # [B, HW, C]
    shuffled = feat_view.clone()

    for i in range(B):
        rand_order = torch.randperm(num_patch - k)
        shuffled[i, shuffle_idx[i]] = feat_view[i, shuffle_idx[i][rand_order]]

    # 合并保持显著 patch 不变
    feat_out = feat_view.clone()
    for i in range(B):
        feat_out[i, shuffle_idx[i]] = shuffled[i, shuffle_idx[i]]
    feat_out = feat_out.permute(0, 2, 1).view(B, C, H, W)

    return feat_out.detach()
