import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange

def scatter_mean(tgt, dim, indices, src):
    """
    Scatter mean operation. It updates the target tensor `tgt` by computing the average 
    of the values in `src` corresponding to the `indices` along the specified dimension.

    Args:
        tgt (Tensor): The target tensor where values are aggregated, shape should be 
                      [B, C, H * W] for 2D data.
        dim (int): The dimension along which to scatter the values.
        indices (Tensor): Indices along the scatter dimension, specifying where to 
                          scatter the values from `src`.
        src (Tensor): The source tensor containing the values to be scattered, shape should be 
                      [B, C, H * W] for 2D data.

    Returns:
        Tensor: The updated tensor with the scattered values.
    """
    # Create a tensor to keep track of the number of occurrences at each index
    count = torch.ones_like(tgt)

    # Scatter sum: Adds the values from `src` to `tgt` at positions defined by `indices`
    new_src = torch.scatter_add(tgt, dim, indices, src)

    # Scatter count: Adds 1 to the `count` tensor at positions defined by `indices`
    new_count = torch.scatter_add(count, dim, indices, torch.ones_like(src))

    # Normalize by dividing by the number of occurrences to compute the mean
    new_src /= new_count

    return new_src


class SPIntraAttModule(nn.Module):
    def __init__(self, dim, num_heads, qk_dim, topk=32, qkv_bias=False, qk_scale=None, attn_drop=0.):
        super().__init__()
        self.dim = dim
        self.qk_dim = qk_dim
        self.num_heads = num_heads
        self.topk = topk  # 每个超像素选取的 top-k 像素数

        self.q = nn.Conv2d(dim, qk_dim, 1, bias=qkv_bias)
        self.k = nn.Conv2d(dim, qk_dim, 1, bias=qkv_bias)
        self.v = nn.Conv2d(dim, dim, 1, bias=qkv_bias)

        self.norm = nn.LayerNorm(dim)

        self.attn_drop = nn.Dropout(attn_drop)
        head_dim = self.qk_dim // self.num_heads
        self.scale = qk_scale or head_dim ** -0.5

    def forward(self, x, affinity_matrix, num_spixels):
        """
        Input:
            x: tensor of shape (B, C, H, W) - input feature map
            affinity_matrix: tensor of shape (B, H*W, M) - affinity matrix between pixels and superpixels
            num_spixels: number of superpixels (M)

        Output:
            x_out: tensor of shape (B, C, H, W) - the output feature map
        """

        B, C, H, W = x.shape

        # Step 1: Normalize input
        #x = self.norm(x)

        # Step 2: Calculate query, key, and value (Q, K, V) projections
        q, k, v = self.q(x), self.k(x), self.v(x)

        # Step 3: Select top-k pixels for each superpixel based on affinity matrix
        _, indices = torch.topk(affinity_matrix, self.topk, dim=-1)  # B, M, topk

        # Gather the top-k features from the Q, K, V for each superpixel
        q_sp_pixels = torch.gather(q.reshape(B, 1, -1, H * W).expand(-1, num_spixels, -1, -1), -1, indices.unsqueeze(2).expand(-1, -1, self.qk_dim, -1))
        k_sp_pixels = torch.gather(k.reshape(B, 1, -1, H * W).expand(-1, num_spixels, -1, -1), -1, indices.unsqueeze(2).expand(-1, -1, self.qk_dim, -1))
        v_sp_pixels = torch.gather(v.reshape(B, 1, -1, H * W).expand(-1, num_spixels, -1, -1), -1, indices.unsqueeze(2).expand(-1, -1, self.dim, -1))

        # Step 4: Rearrange for multi-head attention
        q_sp_pixels, k_sp_pixels, v_sp_pixels = map(lambda t: rearrange(t, 'b k (h c) t -> b k h t c', h=self.num_heads), (q_sp_pixels, k_sp_pixels, v_sp_pixels))

        # Step 5: Compute attention scores
        attn = (q_sp_pixels @ k_sp_pixels.transpose(-2, -1)) * self.scale  # B, M, H, topk, topk
        attn = attn.softmax(dim=-1)
        attn = self.attn_drop(attn)

        # Step 6: Apply attention to get the output
        out = attn @ v_sp_pixels  # B, M, H, topk, C
        out = rearrange(out, 'b m h t c -> b (h c) (m t)')

        # Step 7: Scatter the output back to the pixel grid
        out = scatter_mean(v.reshape(B, self.dim, H * W), -1, indices.reshape(B, 1, -1).expand(-1, self.dim, -1), out)
        out = out.reshape(B, C, H, W)

        return out

if __name__=='__main__':
    # 假设已初始化模块
    intra_attn = SPIntraAttModule(
        dim=40,         # 特征维度（与作者SPIN模型一致）
        num_heads=1,    # 单头（作者默认设置）
        qk_dim=24,      # Q/K维度（对应论文d=24）
        topk=144        # 超像素初始大小12x12，topk=12*12=144（与作者stoken_size匹配）
    )

    # 输入示例
    B, C, H, W = 2, 40, 48, 48  # 假设特征图尺寸
    K = 100                     # 超像素数量（假设）
    x = torch.randn(B, C, H, W)
    affinity_matrix = torch.randn(B, K, H*W)  # 来自SPA模块的关联度矩阵

    # 前向传播
    output = intra_attn(x, affinity_matrix, K)  # 输出(B, C, H, W)
    print(output.shape)