import torch
from torch import nn
import math
from .ffn import StarFFN


class LocalAttention(nn.Module):
    def __init__(self, patch_size, overlap, input_dim, num_heads):
        super(LocalAttention, self).__init__()
        
        # --------- 计算变量和保存 ----------
        hidden_dim=input_dim*(patch_size**2)
        self.num_heads=num_heads
        self.hidden_dim=hidden_dim
        
        # --------- qkv投影，卷积实现更简单 ---------- 
        self.q_project=nn.LazyConv2d(out_channels=hidden_dim, kernel_size=patch_size, stride=patch_size-overlap, bias=False)
        self.k_project=nn.LazyConv2d(out_channels=hidden_dim, kernel_size=patch_size, stride=patch_size-overlap, bias=False)
        self.v_project=nn.LazyConv2d(out_channels=hidden_dim, kernel_size=patch_size, stride=patch_size-overlap, bias=False)

        
        # 全连接ffn
        '''
        self.ffn=nn.Sequential(
            nn.LazyLinear(hidden_dim*4),
            nn.ReLU(),
            nn.LazyLinear(hidden_dim)
        )
        '''
        self.ffn=StarFFN(hidden_dim)
        # layer norm, 怎么用进去？
        self.ln=nn.LayerNorm(hidden_dim)
        
        # --------- 位置编码 ----------
        max_len=256
        self.positional_encoding = nn.Parameter(torch.zeros(1, max_len, hidden_dim))
        
    def forward(self, x):
        b, c, h, w = x.size(0), x.size(1), x.size(2), x.size(3)
        
        # ---------- 获得qkv ----------
        q=torch.permute(self.q_project(x), [0, 2, 3, 1]).contiguous()
        k=torch.permute(self.k_project(x), [0, 2, 3, 1]).contiguous()
        v=torch.permute(self.v_project(x), [0, 2, 3, 1]).contiguous()

        # ---------- 多头划分 ---------
        q, k, v=q.reshape(b, -1, q.size(-1)), k.reshape(b, -1, k.size(-1)), v.reshape(b, -1, v.size(-1))
        # TODO: 添加位置编码
        spatial_pos_enc = self.positional_encoding[:, :q.size(1), :]
        
        # 将位置编码加到q和k上
        q = q + spatial_pos_enc
        k = k + spatial_pos_enc
        
        k=torch.permute(k, [0, 2, 1]).contiguous()
        attn_graph=torch.bmm(q, k)/math.sqrt(self.hidden_dim)
        attn_graph=torch.softmax(attn_graph, dim=-1)

        result=torch.bmm(attn_graph, v)
        result=self.ffn(result)
        result=result.reshape(b, c, h, w)
        return result
        
if __name__=="__main__":
    x=torch.rand(size=[8, 8, 144, 144])
    model=LocalAttention(patch_size=12, overlap=0, input_dim=8, num_heads=3)
    y=model(x)
    print(y.shape)