# 参数设置
d_model = 512 # 词嵌入维度d_model
n_heads = 8
d_k = d_v = d_model //n_heads   # dimension of K(=Q), V
class MultiHeadAttention(nn.Module):
    def __init__(self):
        super(MultiHeadAttention, self).__init__()
        self.W_Q = nn.Linear(d_model, d_k * n_heads, bias=False)
        self.W_K = nn.Linear(d_model, d_k * n_heads, bias=False)
        self.W_V = nn.Linear(d_model, d_v * n_heads, bias=False)
        # 三个矩阵,分别对输入进行三次线性变化
        self.fc = nn.Linear(n_heads * d_v, d_model, bias=False)
        # 变换维度
    def forward(self, input_Q, input_K, input_V, attn_mask):
        '''
        input_Q: [batch_size, len_q, d_model]
        input_K: [batch_size, len_k, d_model]
        input_V: [batch_size, len_v(=len_k), d_model]
        attn_mask: [batch_size, seq_len, seq_len]
        '''

        #  [batch_size, len_q, d_model]
        # (W)-> [batch_size, len_q, d_k * n_heads]
        # (view)->[batch_size, len_q, n_heads, d_k]
        # (transpose)-> [batch_size,n_heads, len_q,d_k ]
        Q = self.W_Q(input_Q).view(batch_size,-1,n_heads,d_k).transpose(1,2)
        K = self.W_K(input_K).view(batch_size,-1,n_heads,d_k).transpose(1,2)
        V = self.W_V(input_V).view(batch_size,-1,n_heads,d_v).transpose(1,2)
        # 生成Q,K,V矩阵
        attn_mask = attn_mask.unsqueeze(1).repeat(1, n_heads, 1, 1)
        # attn_mask : [batch_size, n_heads, seq_len, seq_len]
        context = ScaledDotProductAttention()(Q, K, V, attn_mask)
        # context: [batch_size, n_heads, len_q, d_v]
        context = context.transpose(1, 2).reshape(batch_size, -1,n_heads * d_v)
        # context: [batch_size, len_q, n_heads * d_v]
        output = self.fc(context)
        # [batch_size, len_q, d_model]
        return output 
