import torch
import torch.nn as nn
 
class attention(nn.Module):
    def __init__(self, query_size, key_size, value_size1, value_size2, output_size):
        super(attention, self).__init__()
        self.query_size = query_size
        self.key_size = key_size
        self.value_size1 = value_size1  
        self.value_size2 = value_size2
        self.output_size = output_size
        self.line1 = nn.Linear(self.query_size + key_size, self.value_size1) # 32+32,32
        self.line2 = nn.Linear(self.query_size + value_size2, self.output_size) #  (32+64,64)
 
        '''
        query_size = 32   # Q 的最后一个维度
        key_size = 32      #K 的最后一个维度 
        value_size1 = 32   # V 的第二个维度
        value_size2 = 64   # V的最后一个维度
        output_size = 64   # 输出维度
        
        '''
 
 
    def forward(self, Q, K, V):
        attention_weight = torch.softmax(self.line1(torch.cat((Q[0], K[0]), 1)), dim=1) # 输出    （1,32） （1,32）  -> (1,64) ->(64,32) -> （1,32） -> softmax   (1,32)
        # check shape
        print(Q[0].shape, K[0].shape, (torch.cat((Q[0], K[0]), 1)).shape)
        # torch.Size([1, 32]) torch.Size([1, 32]) torch.Size([1, 64])
        print(self.line1(torch.cat((Q[0], K[0]), 1)).shape)
        # torch.Size([1, 32])
        print(attention_weight.shape)
        # torch.Size([1, 32])
 
        attention_applied = torch.bmm(attention_weight.unsqueeze(0), V) # (1,1,32) * (1,32,64) - > (1,1,64)
        # check shape
        print(attention_weight.unsqueeze(0).shape)
        # torch.Size([1, 1, 32])
        print(attention_applied.shape)
        # torch.Size([1, 1, 64])
 
        output = torch.cat((Q[0], attention_applied[0]), 1)  # (1,32) (1,64)   cat  -> (1,32+64)
        # check shape
        print(output.shape)
        # torch.Size([1, 96])
 
        output = self.line2(output).unsqueeze(0)     #  (32+64,64) (1,32+64)  (1,64) - > (1,1,64)
        # check shape
        print(output.shape)
        # torch.Size([1, 1, 64])
 
        return output, attention_weight  # (1,1,64) , (1,32)
 
 
def main():
    query_size = 32
    key_size = 32
    value_size1 = 32
    value_size2 = 64
    output_size = 64
    attn = attention(query_size, key_size, value_size1, value_size2, output_size)
    # (batch_size, seq_len, hidden_size)
    # 批量数, 输入序列长度, 每个词向量的维度
    Q = torch.randn(1, 1, 32)
    K = torch.randn(1, 1, 32)
    V = torch.randn(1, 32, 64)
    out, weights = attn(Q, K ,V)
    print(out)
    print(weights)
 
if __name__ == "__main__":
    main()
 
# tensor([[[ 0.0659, -0.2032, -0.1200,  0.3103,  0.1267, -0.2573,  0.0595,
#           -0.3809,  0.6900,  0.5539, -0.1737, -0.3153,  0.7032, -0.6192,
#           -0.0120,  0.4132,  0.1484,  0.3911, -0.0876,  0.3548, -0.2247,
#           -0.3236,  0.2761, -0.1817, -0.2472, -0.3050,  0.4670,  0.3442,
#           -0.0092,  0.5283,  0.0881,  0.2219, -0.7051, -0.0028,  0.5049,
#            0.7083, -0.2809,  0.3218, -0.3225,  0.1372,  0.3596,  0.0069,
#           -0.1422, -0.0494,  0.4049, -0.0856,  0.5200, -0.0793, -0.0608,
#           -0.0135,  0.3282, -0.6138,  0.0643, -0.6000, -0.1060,  0.4633,
#            0.1958, -0.2890,  0.3448, -0.2266,  0.1201,  0.3016,  0.7245,
#            0.2607]]], grad_fn=<UnsqueezeBackward0>)
# tensor([[0.0564, 0.0397, 0.0270, 0.0137, 0.0302, 0.0242, 0.0452, 0.0154, 0.0244,
#          0.0204, 0.0363, 0.0151, 0.0079, 0.0592, 0.0556, 0.0196, 0.0188, 0.1013,
#          0.0348, 0.0679, 0.0289, 0.0183, 0.0196, 0.0123, 0.0113, 0.0516, 0.0051,
#          0.0378, 0.0405, 0.0109, 0.0426, 0.0080]], grad_fn=<SoftmaxBackward0>)