import torch
from torch import nn
import math
from .ffn import StarFFN

class CrossSuperPixelAggregation(nn.Module):
    def __init__(self, hidden_dim, num_heads=1):
        super(CrossSuperPixelAggregation, self).__init__()
        self.vx_projection = nn.LazyLinear(out_features=hidden_dim, bias=True)
        self.kx_projection = nn.LazyLinear(out_features=hidden_dim, bias=True)
        self.qs_projection = nn.LazyLinear(out_features=hidden_dim, bias=True)
        self.qx_projection = nn.LazyLinear(out_features=hidden_dim, bias=True)

        '''
        self.ffn1 = nn.Sequential(
            nn.LazyLinear(hidden_dim * 4),
            nn.ReLU(),
            nn.LazyLinear(hidden_dim)
        )

        
        self.ffn2 = nn.Sequential(
            nn.LazyLinear(hidden_dim * 4),
            nn.ReLU(),
            nn.LazyLinear(hidden_dim)
        )
        '''
        self.ffn1, self.ffn2=StarFFN(hidden_dim), StarFFN(hidden_dim)
        
        # todo:可能使用批量归一化
        self.ln1 = nn.LayerNorm(hidden_dim)

        # ---------- 保存变量 ----------
        self.hidden_dim = hidden_dim
        self.num_heads = num_heads
        self.head_dim = hidden_dim // num_heads
        assert self.head_dim * num_heads == hidden_dim, "hidden_dim must be divisible by num_heads"

    def split_heads(self, x):
        batch_size, seq_length, _ = x.size()
        return x.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)

    def combine_heads(self, x):
        batch_size, _, seq_length, _ = x.size()
        return x.transpose(1, 2).contiguous().view(batch_size, seq_length, self.hidden_dim)

    def forward(self, x, s, topk=16):
        batch_size = x.size(0)
        num_channels = x.size(1)
        h, w = x.size(2), x.size(3)
        hw = h * w
        m = s.size(1)

        x = x.reshape(batch_size, num_channels, -1)
        x = torch.permute(x, dims=[0, 2, 1])  # (b, hw, c)
        x = self.ln1(x)

        # Projections
        vx = self.vx_projection(x)  # (b, hw, c)
        kx = self.kx_projection(x)  # (b, hw, c)
        qs = self.qs_projection(s)  # (b, m, c)
        qx = self.qx_projection(x)  # (b, hw, c)

        vx = self.split_heads(vx)  # (b, heads, hw, d)
        kx = self.split_heads(kx)  # (b, heads, hw, d)
        qs = self.split_heads(qs)  # (b, heads, m, d)
        qx = self.split_heads(qx)  # (b, heads, hw, d)

        # Attention: superpixel <-- pixel
        kx_t = kx.transpose(-1, -2)  # (b, heads, d, hw)
        attn1 = torch.matmul(qs, kx_t) / math.sqrt(self.head_dim)  # (b, heads, m, hw)

        # --- 稀疏化 attention ---
        topk_values, topk_indices = torch.topk(attn1, k=topk, dim=-1)
        mask = torch.full_like(attn1, float('-inf'))
        mask.scatter_(-1, topk_indices, topk_values)
        attn1 = torch.softmax(mask, dim=-1)  # 仍然归一化在 topk 上

        s_result = torch.matmul(attn1, vx)  # (b, heads, m, d)
        s_result = self.combine_heads(s_result)
        s_result = self.ffn1(s_result)

        # Attention: pixel <-- superpixel
        qs_transposed = qs.transpose(-1, -2)  # (b, heads, d, m)
        attn2 = torch.matmul(qx, qs_transposed) / math.sqrt(self.head_dim)  # (b, heads, hw, m)

        # --- 稀疏化 attention ---
        topk_values2, topk_indices2 = torch.topk(attn2, k=topk, dim=-1)
        mask2 = torch.full_like(attn2, float('-inf'))
        mask2.scatter_(-1, topk_indices2, topk_values2)
        attn2 = torch.softmax(mask2, dim=-1)

        s_result_h = self.split_heads(s_result)  # (b, heads, m, d)
        x_result = torch.matmul(attn2, s_result_h)  # (b, heads, hw, d)
        x_result = self.combine_heads(x_result)
        x_result = self.ffn2(x_result)

        x_result = x_result.reshape(batch_size, h, w, num_channels)
        x_result = x_result.permute(0, 3, 1, 2).contiguous()
        return x_result



if __name__ == '__main__':
    batch_size = 8
    h, w = 224, 224
    channels = 48
    m = 16
    x = torch.rand(size=[batch_size, channels, h, w], dtype=torch.float32)
    s = torch.rand(size=[batch_size, m, channels], dtype=torch.float32)

    model = CrossSuperPixelAggregation(hidden_dim=channels, num_heads=4)
    result = model(x, s)
    print(result.shape)
    