from transformers_network.scaled_dot_product_attention import ScaledDotProductAttention, nn, ops, mstype, Tensor


class MultiHeadAttention(nn.Cell):
    def __init__(self, d_key, n_heads, dropout_p=0.):
        """
        d_key: int, 输入的维度
        n_heads: int, 多头注意力的头数
        dropout_p: float, dropout的概率
        """
        super().__init__()
        self.n_heads = n_heads
        self.d_key = d_key
        self.head_dim = d_key // n_heads
        if self.head_dim * n_heads != self.d_key:
            raise ValueError("The init argument 'd_key' must be divisible by 'n_heads'.")

        # self.head_dim * self.n_heads = d_key，为了方便理解并行地将输入分成了多个头
        self.W_Q = nn.Dense(self.d_key, self.head_dim * self.n_heads)  #
        self.W_K = nn.Dense(self.d_key, self.head_dim * self.n_heads)
        self.W_V = nn.Dense(self.d_key, self.head_dim * self.n_heads)

        self.W_O = nn.Dense(d_key, d_key)
        self.attention = ScaledDotProductAttention(dropout_p=dropout_p)

    def construct(self, query, key, value, attn_mask):
        """
        query: [batch_size, len_tgt, d_key]，
        key: [batch_size, len_src, d_key]
        value: [batch_size, len_src, d_key]
        attn_mask: [batch_size, seq_len, seq_len]
        """

        batch_size = query.shape[0]

        # linear projection, split into multiple heads
        Q = self.W_Q(query).view(batch_size, -1, self.n_heads, self.head_dim)
        K = self.W_K(key).view(batch_size, -1, self.n_heads, self.head_dim)
        V = self.W_V(value).view(batch_size, -1, self.n_heads, self.head_dim)

        Q = Q.transpose((0, 2, 1, 3))
        K = K.transpose((0, 2, 1, 3))
        V = V.transpose((0, 2, 1, 3))

        attn_mask = attn_mask.expand_dims(1)  # 新的形状是 [batch_size, 1, seq_len, seq_len]
        # 进行（多头）重复，生成新的形状 [batch_size, n_heads, seq_len, seq_len]
        attn_mask = ops.tile(attn_mask, (1, self.n_heads, 1, 1))

        context, attn = self.attention(Q, K, V, attn_mask)

        context = context.transpose((0, 2, 1, 3)).view((batch_size, -1, self.n_heads * self.head_dim))  # 转置后再合并，对应论文中的concat操作

        output = self.W_O(context)

        return output, attn


if __name__ == '__main__':
    batch_size = 1
    seq_len = 40
    d_key = 32
    n_heads = 8
    dropout_p = 0.1

    query = ops.ones((batch_size, seq_len, d_key), mstype.float32)
    key = ops.ones((batch_size, seq_len, d_key), mstype.float32)
    value = ops.ones((batch_size, seq_len, d_key), mstype.float32)
    attn_mask = ops.ones((batch_size, seq_len, seq_len), mstype.bool_)

    multi_head_attn = MultiHeadAttention(d_key, n_heads, dropout_p)
    output, attn = multi_head_attn(query, key, value, attn_mask)
    print(output.shape, attn.shape)

    attn = nn.MultiheadAttention(d_key, n_heads, dropout_p, batch_first=True)
    attn_mask = ops.ones((batch_size, seq_len), mstype.bool_)
    output, attn = attn(query, key, value, attn_mask)
    print(output.shape, attn.shape)