import mindspore
from mindspore import nn, ops, Tensor
from mindspore import dtype as mstype


class ScaledDotProductAttention(nn.Cell):
    def __init__(self, dropout_p=0.):
        super().__init__()
        self.softmax = nn.Softmax()
        self.dropout = nn.Dropout(p=dropout_p)
        self.sqrt = ops.Sqrt()

    def construct(self, query, key, value, attn_mask=None):
        """scaled dot product attention, 当Q，K,V相等时表示自注意力机制
        Args:
            query: [batch_size, n_heads, seq_len_Q, d_k]
            key: [batch_size, n_heads, seq_len_K, d_k]
            value: [batch_size, n_heads, seq_len_K, d_v]
            attn_mask: [batch_size, seq_len_Q, seq_len_K]
        Returns:
            output: [batch_size, n_heads, seq_len, d_v]
            attn: [batch_size, n_heads, seq_len, seq_len]
        Note:
            attn is the attention weights
            output is the attention context vector
        """

        embed_size = query.shape[-1]
        scaling_factor = self.sqrt(Tensor(embed_size, mstype.float32))  # sqrt(d_k)
        # 计算query和key的缩放点积注意力分数
        attn = ops.matmul(query, key.swapaxes(-2, -1) / scaling_factor)

        if attn_mask is not None:  # 如果存在attn_mask，则将其应用于注意力分数，以避免在特定位置进行注意力计算
            attn = attn.masked_fill(attn_mask, -1e9)  # 将attn_mask中为True的对应位置的attn的元素替换为-1e9

        # 将注意力分数通过Softmax函数转换为权重，并应用丢弃以防止过拟合
        attn = self.softmax(attn)
        attn = self.dropout(attn)

        # 使用注意力权重对value进行加权平均，以生成最终的输出
        output = ops.matmul(attn, value)

        return output, attn


if __name__ == '__main__':
    # attention = ScaledDotProductAttention()
    # batch_size = 128
    # seq_len_Q = 36
    # seq_len_K = 24
    # d_k = 64
    # d_v = 32
    # query = ops.ones((batch_size, seq_len_Q, d_k), mindspore.float32)
    # key = ops.ones((batch_size, seq_len_K, d_k), mindspore.float32)
    # value = ops.ones((batch_size, seq_len_K, d_v), mindspore.float32)
    # attn_mask = ops.ones((batch_size, seq_len_Q, seq_len_K), mindspore.float32)
    # attn_mask = mindspore.ops.gt(attn_mask, attn_mask)
    # output, attn = attention(query, key, value, attn_mask)
    # print(output.shape, attn.shape)

    # test for self-attention
    class SelfAttention(nn.Cell):
        def __init__(self, d_model, dropout_p=0.):
            super().__init__()
            self.attn = ScaledDotProductAttention(dropout_p)
            self.W_q = nn.Dense(d_model, d_model, has_bias=False)
            self.W_k = nn.Dense(d_model, d_model, has_bias=False)
            self.W_v = nn.Dense(d_model, d_model, has_bias=False)
            self.dropout = nn.Dropout(p=dropout_p)

            self.attn = ScaledDotProductAttention(dropout_p)

        def construct(self, x, attn_mask=None):
            """self-attention
            Args:
                x: [batch_size, n, d_model]
                attn_mask: [batch_size, n, n]
            Returns:
                output: [batch_size, n, d_model]
                attn: [batch_size, n, n]
            """
            Q = self.W_q(x)
            K = self.W_k(x)
            V = self.W_v(x)
            output, attn = self.attn(Q, K, V, attn_mask)

            return output, attn

    d_model = 64
    self_attn = SelfAttention(d_model)

    batch_size = 1
    n = 32
    x = ops.ones((batch_size, n, d_model), mindspore.float32)
    attn_mask = ops.ones((batch_size, n, n), mindspore.float32)
    attn_mask = mindspore.ops.gt(attn_mask, attn_mask)
    output, attn = self_attn(x, attn_mask)
    print(output.shape, attn.shape)

