import torch
import torch.nn as nn
import torch.nn.functional as F
torch.manual_seed(114514)
class MultiHeadAttention(nn.Module):
    def __init__(self,embed_size,num_heads,dropout=0.1):
        super().__init__()
        assert embed_size % num_heads == 0
        self.embed_size = embed_size
        self.num_heads = num_heads
        self.head_dim = embed_size // num_heads
        self.query_linear = nn.Linear(embed_size,embed_size)
        self.key_linear = nn.Linear(embed_size,embed_size)
        self.value_linear = nn.Linear(embed_size,embed_size)
        self.output_linear = nn.Linear(embed_size,embed_size)
        self.dropout = nn.Dropout(dropout)
    def scaled_dot_product_attention(self,Q,K,V,mask=None):
        d_k = Q.size(-1)
        scores = torch.matmul(Q,K.transpose(-2,-1)) / torch.sqrt(torch.tensor(d_k,dtype=torch.float32))
        if mask is not None:
            scores = scores.masked_fill(mask == 0,-1e9)
        attention_weights = torch.softmax(scores,dim=-1)
        attention_weights = self.dropout(attention_weights)
        output = torch.matmul(attention_weights,V)
        return output,attention_weights
    def forward(self,query,key,value,mask=None):
        batch_size,seq_len,_ = query.size()
        Q = self.query_linear(query)
        K = self.key_linear(key)
        V = self.value_linear(value)
        Q = Q.view(batch_size,seq_len,self.num_heads,self.head_dim).transpose(1,2)
        K = K.view(batch_size,seq_len,self.num_heads,self.head_dim).transpose(1,2)
        V = V.view(batch_size,seq_len,self.num_heads,self.head_dim).transpose(1,2)
        attn_output,attn_weights = self.scaled_dot_product_attention(Q,K,V,mask)
        attn_output = attn_output.transpose(1,2).contiguous().view(batch_size,seq_len,self.embed_size)
        output = self.output_linear(attn_output)
        weights = attn_weights[0]
        return output,weights
if __name__ == "__main__":
    batch_size = 10
    seq_len = 20
    embed_size = 128
    num_heads = 8
    input_tensor = torch.randn(batch_size,seq_len,embed_size)
    model = MultiHeadAttention(embed_size,num_heads)
    output,attn_weights = model(input_tensor,input_tensor,input_tensor)
    print(output.shape,attn_weights.shape)
    print(output[0][0][:10],attn_weights[0][0][:10])
