from Models.models.irpe import build_rpe, get_rpe_config
from vit_pytorch.vit import Attention

import torch
from torch import nn, einsum
import torch.nn.functional as F

from einops import rearrange, repeat
from einops.layers.torch import Rearrange

class RPEAttention(Attention):
    '''
    Attention with image relative position encoding
    '''

    def __init__(self, dim, heads = 8, dim_head = 64, dropout = 0., rpe_config=None, use_res=True):
        super().__init__(dim, heads=heads, dim_head=dim_head, dropout=dropout)
        self.use_res = use_res

        # image relative position encoding
        self.rpe_q, self.rpe_k, self.rpe_v = \
            build_rpe(rpe_config,
                      head_dim=dim_head,
                      num_heads=heads)

    def forward(self, x):
        # B, N, C = x.shape
        # qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
        # q, k, v = qkv[0], qkv[1], qkv[2]   # make torchscript happy (cannot use tensor as tuple)

        # q *= self.scale

        b, n, _, h = *x.shape, self.heads
        qkv = self.to_qkv(x).chunk(3, dim = -1)
        q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), qkv)

        attn = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale

        # attn = (q @ k.transpose(-2, -1))

        # image relative position on keys
        if self.rpe_k is not None:
            attn += self.rpe_k(q)

        # image relative position on queries
        if self.rpe_q is not None:
            attn += self.rpe_q(k * self.scale).transpose(2, 3)

        attn = attn.softmax(dim=-1)

        out = einsum('b h i j, b h j d -> b h i d', attn, v)

        # image relative position on values
        if self.rpe_v is not None:
            out += self.rpe_v(attn)

        out = rearrange(out, 'b h n d -> b n (h d)')
        out = self.to_out(out)
        # 添加残差连接
        if self.use_res:
            out = x + out
        return out

if __name__ == "__main__":
    config = get_rpe_config(skip=0, ratio=2.4, rpe_on='qkv')
    attn = RPEAttention(640, rpe_config=config)
    x = torch.randn((16, 25, 640))

    out = attn(x)
    print(out.shape)