import torch
from torch import nn
from xformers.ops import memory_efficient_attention
from .ffn import StarFFN

class CrossSuperPixelAggregation(nn.Module):
    def __init__(self, hidden_dim, num_heads=1):
        super(CrossSuperPixelAggregation, self).__init__()
        self.vx_projection = nn.Linear(hidden_dim, hidden_dim, bias=True)
        self.kx_projection = nn.Linear(hidden_dim, hidden_dim, bias=True)
        self.qs_projection = nn.Linear(hidden_dim, hidden_dim, bias=True)
        self.qx_projection = nn.Linear(hidden_dim, hidden_dim, bias=True)

        self.ffn1 = StarFFN(hidden_dim)
        self.ffn2 = StarFFN(hidden_dim)

        self.ln1 = nn.LayerNorm(hidden_dim)

        self.hidden_dim = hidden_dim
        self.num_heads = num_heads
        self.head_dim = hidden_dim // num_heads
        assert self.head_dim * num_heads == hidden_dim, "hidden_dim must be divisible by num_heads"

    def split_heads(self, x):
        # x: (b, seq, hidden_dim) → (b, seq, heads, head_dim)
        b, seq, dim = x.size()
        return x.view(b, seq, self.num_heads, self.head_dim)

    def combine_heads(self, x):
        # x: (b, seq, heads, head_dim) → (b, seq, hidden_dim)
        b, seq, heads, d = x.size()
        return x.contiguous().view(b, seq, heads * d)

    def forward(self, x, s, **kwargs):
        batch_size = x.size(0)
        num_channels = x.size(1)
        h, w = x.size(2), x.size(3)
        hw = h * w
        m = s.size(1)

        x = x.view(batch_size, num_channels, -1).permute(0, 2, 1)  # (b, hw, c)
        x = self.ln1(x)

        # Linear projections
        vx = self.vx_projection(x)  # (b, hw, c)
        kx = self.kx_projection(x)  # (b, hw, c)
        qs = self.qs_projection(s)  # (b, m, c)
        qx = self.qx_projection(x)  # (b, hw, c)

        # Split heads
        vx = self.split_heads(vx)  # (b, hw, heads, d)
        kx = self.split_heads(kx)
        qs = self.split_heads(qs)  # (b, m, heads, d)
        qx = self.split_heads(qx)

        # --- Cross Attention: superpixel ← pixel ---
        attn1_out = memory_efficient_attention(
            query=qs, key=kx, value=vx  # shape: (b, m/hw, h, d)
        )  # (b, m, h, d)
        s_result = self.combine_heads(attn1_out)  # (b, m, c)
        s_result = self.ffn1(s_result)  # (b, m, c)

        # --- Cross Attention: pixel ← superpixel ---
        s_result_h = self.split_heads(s_result)  # (b, m, h, d)
        attn2_out = memory_efficient_attention(
            query=qx, key=qs, value=s_result_h  # (b, hw, h, d)
        )  # (b, hw, h, d)
        x_result = self.combine_heads(attn2_out)  # (b, hw, c)
        x_result = self.ffn2(x_result)

        # reshape back to (b, c, h, w)
        x_result = x_result.view(batch_size, h, w, num_channels).permute(0, 3, 1, 2).contiguous()
        return x_result


if __name__ == '__main__':
    batch_size = 8
    h, w = 224, 224
    channels = 48
    m = 16
    x = torch.rand(size=[batch_size, channels, h, w], dtype=torch.float32, device='cuda')
    s = torch.rand(size=[batch_size, m, channels], dtype=torch.float32, device='cuda')

    model = CrossSuperPixelAggregation(hidden_dim=channels, num_heads=4)
    model.to('cuda')
    result = model(x, s)
    print(result.shape)
