# 参数设置
d_model = 512 # 词嵌入维度d_model
n_heads = 8
d_k = d_v = d_model //n_heads # dimension of K(=Q), V
# K,Q,V矩阵的维度（K和Q一定是一样的,因为要K乘Q的转置）,V不一定

class ScaledDotProductAttention(nn.Module):
    def __init__(self):
        super(ScaledDotProductAttention, self).__init__()
    def forward(self, Q, K, V):
        '''
        Q: [batch_size, n_heads, len_q, d_k]
        K: [batch_size, n_heads, len_k, d_k]
        V: [batch_size, n_heads, len_v(=len_k), d_v]
        attn_mask: [batch_size, n_heads, seq_len, seq_len]
        '''
        # 所有的q和k进行矩阵相乘 (..., n, n)
        matmul_qk = torch.matmul(Q, K.transpose(-1, -2))
        # Q:(10, 8, 4, 64), K:(10, 8, 4, 64)
        # K.transpose(-1, -2)的最后2维度转置为(10,8,64,4)  
        # matmul_qk:(10, 8, 4, 4)
        # 等价于：
        # K_T= K.permute(0, 1,3,2)
        # matmul_qk = torch.matmul(Q, K_T)
        # 缩放 matmul_qk ,获取k的维度,再除于k
        d_k = torch.tensor(K.shape[-1],dtype=torch.float)
        scores = matmul_qk / np.sqrt(d_k)
        # scores : [batch_size, n_heads, len_q, len_k]
        # 对每一行进行softmax,将Attention矩阵按照最后一个维度进行softmax
        attn_score = nn.Softmax(dim=-1)(scores)
        # attn_score： [batch_size, n_heads, len_q, len_k]

        # 将每一列的注意力与v进行相乘(..., n, d_v)
        context = torch.matmul(attn_score, V)
        # [batch_size, n_heads, len_q, d_v]

        # 最后返回注意力矩阵跟value的乘积,以及注意力矩阵
        return context

# 测试代码
temp_k = torch.rand((10, 8, 4,64))# (batch_size,num_heads,seq_len_q,depth)
temp_v = torch.rand((10, 8, 4,64))# (batch_size,num_heads,seq_len_q,depth)
temp_q = torch.rand((10, 8, 4,64))
# (batch_size, num_heads, seq_len_q, depth)

temp_out, temp_att = ScaledDotProductAttention()(temp_q,temp_k,temp_v)
print(temp_out.shape) # torch.Size([10, 8, 4, 64])
print(temp_att.shape) # torch.Size([10, 8, 4, 4])
