import torch
import torch.nn as nn
import torch.nn.functional as F

class MyAtt(nn.Module):
    def __init__(self,query_szie,key_size,value_size1,value_size2,output_size):
        super(MyAtt,self).__init__()
        self.query_szie = query_szie
        self.key_size = key_size
        self.value_size1 = value_size1
        self.value_siez2 = value_size2
        self.output_size = output_size
        # 权重分布
        self.attn = nn.Linear(query_szie+key_size,value_size1)
        # 注意力结果按指定维度输出
        self.attn_combie = nn.Linear(query_szie+value_size2,output_size)

    def forward(self, Q, K, V):
        # 1、 [Q,K] [1,1,32] [1,1,32] -> [1,32] [1,32] -> [1,64]
        tmp1 = torch.cat((Q[0],K[0]),dim=-1)

        # 2、 Linear([Q,K]) [1,64] -> [1,10]
        tmp2 = self.attn(tmp1)

        # 3、Softmax(Linear([Q,K])) [1,10] -> [1,10]
        attn_weights = F.softmax(tmp2,dim=-1)
        print('attn_weights-->',attn_weights.shape,attn_weights)

        # 4、r = Softmax(Linear([Q,K])) * V  bmm [1,10] -> [1,1,10] * [1,10,32] -> [1,1,32]
        attn_applied = torch.bmm(attn_weights.unsqueeze(0),V)

        # 5、[Q,Softmax(Linear([Q,K])) * V] [1,32] [1,32] -> [1,64]
        output = torch.cat((Q[0],attn_applied[0]),dim=-1)

        # 6、linear([Q,Softmax(Linear([Q,K])) * V]) [1,64] -> [1,32] -> [1,1,32]
        output = self.attn_combie(output).unsqueeze(0)
        return  output,attn_weights


if __name__ == '__main__':
    query_size = 32
    key_size = 32
    value_size1 = 10
    value_size2 = 32
    output_size = 32

    # 1 prepare data
    Q = torch.randn(1,1,32)
    K = torch.randn(1,1,32)
    V = torch.randn(1,10,32)

    # 2 实例化
    myattention = MyAtt(32,32,10,32,32)
    print('myattention',myattention)

    # 3 传入数据
    output, attn_weights = myattention(Q,K,V)

    # 4 打印
    print('增强的q-->',output.shape,output)
    print('q的权重分布-->',attn_weights.shape,attn_weights)
    print('End')
