import torch
import torch.nn as nn
import torch.nn.functional as F
class AttentionHead(nn.Module):
    def __init__(self, in_channels:int, out_channels:int):
        super().__init__()
        self.liner_Wq = nn.Linear(in_channels, out_channels)
        self.liner_Wk = nn.Linear(in_channels, out_channels)
        self.liner_Wv = nn.Linear(in_channels, out_channels)

    def forward(self, x):
        q = self.liner_Wq(x)
        k = self.liner_Wk(x)
        v = self.liner_Wv(x)
        return [q,k,v]

def AttentionCore(Q:torch.Tensor,K:torch.Tensor,V:torch.Tensor):
    alpha = torch.bmm(Q, K.transpose(1,2))
    alpha = F.softmax(alpha, 1)
    output = torch.bmm(alpha, V)
    return output


class SelfAttention(nn.Module):
    def __init__(self, in_channels: int, out_channels: int):
        super().__init__()
        self.in_channels = in_channels
        self.head = AttentionHead(in_channels,out_channels)

    def forward(self, x):
        b, c, h, w = x.size()
        x = x.permute([0,2,3,1]).reshape(-1,h*w,self.in_channels).float()
        x = AttentionCore(*(self.head(x)))

        x = x.permute([0, 2, 1]).reshape(b, c, h, w).float()
        return x


class CrossAttention(nn.Module):
    def __init__(self, in_channels: int,hidden_channels: int, out_channels: int):
        super().__init__()
        self.in_channels = in_channels
        self.out_channels = out_channels

        self.head1 = AttentionHead(in_channels,hidden_channels)
        self.head2 = AttentionHead(in_channels,hidden_channels)

        self.align1 = SelfAttention(hidden_channels, out_channels)
        self.align2 = SelfAttention(hidden_channels, out_channels)

    def forward(self, x):
        b1, c1, h1, w1 = x[0].size()
        x[0] = x[0].permute([0, 2, 3, 1]).reshape(-1, h1*w1,self.in_channels).float()
        list_QKV1 = self.head1(x[0])

        b2, c2, h2, w2 = x[1].size()
        x[1] = x[1].permute([0, 2, 3, 1]).reshape(-1,  h2*w2, self.in_channels).float()
        list_QKV2 = self.head2(x[1])

        list_QKV1[0], list_QKV2[0] = list_QKV2[0],list_QKV1[0]
        output1 = AttentionCore(*list_QKV1)
        output2 = AttentionCore(*list_QKV2)

        output1 = output1.permute([0, 2, 1]).reshape(b1,c1,h1,w1).float()
        output2 = output2.permute([0, 2, 1]).reshape(b2,c2,h2,w2).float()

        output1 = self.align1(output1)
        output2 = self.align2(output2)

        return output1,output2



if __name__ == '__main__':
    l =  nn.Linear(3, 4)
    x = torch.randn((3,10,10,3))
    print(l(x))


    width = 14
    height = 14
    batch_size = 2
    channel = 256

    t = torch.arange(batch_size* channel * width * height).reshape(-1, channel,height, width ).cuda()
    t2 = torch.arange(batch_size* channel * width * height).reshape(-1, channel,height, width ).cuda()
    testModule = CrossAttention(channel,channel, channel).cuda()
    print(testModule([t,t2])[0].size())



