import torch
import torch.nn as nn

"""
多头注意力机制
"""


class MultiHeadAttention(nn.Module):
    def __init__(self, d_model=512, num_heads=8):
        super(MultiHeadAttention, self).__init__()
        assert d_model % num_heads == 0
        self.d_model = d_model
        self.num_heads = num_heads
        self.head_dim = d_model // num_heads

        self.query_linear = nn.Linear(d_model, d_model)
        self.key_linear = nn.Linear(d_model, d_model)
        self.value_linear = nn.Linear(d_model, d_model)

        self.output_linear = nn.Linear(d_model, d_model)

    def forward(self, x):
        B, L, D = x.shape

        query = self.query_linear(x)
        key = self.key_linear(x)
        value = self.value_linear(x)

        query = query.reshape(B, L, self.num_heads, self.head_dim).transpose(1, 2)
        key = key.reshape(B, L, self.num_heads, self.head_dim).transpose(1, 2)
        value = value.reshape(B, L, self.num_heads, self.head_dim).transpose(1, 2)

        attn_weights = torch.matmul(query, key.transpose(-2, -1)) / (self.head_dim ** 0.5)
        attn_weights = attn_weights.softmax(dim=-1)

        attn_output = torch.matmul(attn_weights, value)

        attn_output = attn_output.transpose(1, 2).reshape(B, L, D)
        attn_output = self.output_linear(attn_output)
        return attn_output


if __name__ == '__main__':
    from embedding_layer import EmbeddingLayer

    test_input = torch.randint(0, 10, (4, 8))
    embedding = EmbeddingLayer(10, 512)
    test_input = embedding(test_input)
    model = MultiHeadAttention()
    out_put = model(test_input)
    print(out_put)
