import torch
import torch.nn as nn

"""
掩码多头注意力机制
"""


class MaskedMultiHeadAttention(nn.Module):
    def __init__(self, d_model=512, num_heads=8):
        super(MaskedMultiHeadAttention, self).__init__()
        assert d_model % num_heads == 0
        self.d_model = d_model
        self.num_heads = num_heads
        self.head_dim = d_model // num_heads

        self.query_linear = nn.Linear(d_model, d_model)
        self.key_linear = nn.Linear(d_model, d_model)
        self.value_linear = nn.Linear(d_model, d_model)

        self.output_linear = nn.Linear(d_model, d_model)

    def _split_heads(self, x):
        B, L, _ = x.shape
        return x.reshape(B, L, self.num_heads, self.head_dim).transpose(1, 2).contiguous()

    def forward(self, x, causal=True, padding_mask_key=None):
        B, L, D = x.shape
        query = self.query_linear(x)
        key = self.key_linear(x)
        value = self.value_linear(x)

        query = self._split_heads(query)
        key = self._split_heads(key)
        value = self._split_heads(value)

        attn_weights = torch.matmul(query, key.transpose(-2, -1)) / (self.head_dim ** 0.5)

        if padding_mask_key is not None:
            padding_mask = padding_mask_key.unsqueeze(1).unsqueeze(1).bool()
            attn_weights = attn_weights.masked_fill(~padding_mask, float('-inf'))

        if causal:
            causal_mask = torch.tril(torch.ones(L, L, device=x.device, dtype=torch.bool))
            attn_weights = attn_weights.masked_fill(~causal_mask, float('-inf'))

        attn_weights = attn_weights.softmax(dim=-1)
        attn_output = torch.matmul(attn_weights, value)
        attn_output = attn_output.transpose(1, 2).reshape(B, L, D)
        attn_output = self.output_linear(attn_output)
        return attn_output


if __name__ == '__main__':
    from embedding_add_positional_encoding import EmbeddingAddPositionalEncoding

    test_input = torch.randint(0, 10, (5, 8))
    key_padding_mask = torch.Tensor([[True, True, True, True, True, True, True, True],
                                     [True, True, True, True, True, True, True, False],
                                     [True, True, True, True, True, True, False, False],
                                     [True, True, True, True, False, False, False, False],
                                     [True, True, True, True, True, False, False, False]])

    embedding = EmbeddingAddPositionalEncoding(10, 512, seq_len=8, d_model=512)
    test_input = embedding(test_input)
    model = MaskedMultiHeadAttention()
    out_put = model(test_input, causal=False, padding_mask_key=key_padding_mask)
    print(out_put)
