import torch
from torch import nn
import math
#每个头关注不同的嵌入部分
class MultiHeadAttention(nn.Module):
    def __init__(self, dim_model, head_num, dropout_p):
        super(MultiHeadAttention,self).__init__()
        self.head_num = head_num
        self.dim_model = dim_model
        assert self.dim_model % self.head_num == 0, "dim_model must be divisible by head_num"
        self.dim_per_head = self.dim_model // self.head_num
        self.w_q = nn.Linear(dim_model, dim_model)
        self.w_k = nn.Linear(dim_model, dim_model)
        self.w_v = nn.Linear(dim_model, dim_model)
        self.w_o = nn.Linear(dim_model, dim_model)
        self.dropout = nn.Dropout(dropout_p)

    @staticmethod
    def attention(q, k, v, mask, dropout):
        dim_k = q.shape[-1]
        attention_scores = q @ k.transpose(-2, -1) / math.sqrt(dim_k)
        if mask is not None:
            attention_scores = attention_scores.masked_fill(mask==0,-10000)
        attention_scores = attention_scores.softmax(dim=-1)
        if dropout is not None:
            attention_scores = dropout(attention_scores)
        return attention_scores @ v, attention_scores

    def forward(self, q, k, v, mask=None):
        batch_size, seq_len, dimension = q.shape
        q, k, v = self.w_q(q), self.w_k(k), self.w_v(v)
        q = q.view(batch_size, seq_len, self.head_num, self.dim_per_head).permute(0, 2, 1, 3)
        k = k.view(batch_size, seq_len, self.head_num, self.dim_per_head).permute(0, 2, 1, 3)
        v = v.view(batch_size, seq_len, self.head_num, self.dim_per_head).permute(0, 2, 1, 3)
        x, self.attention_scores = MultiHeadAttention.attention(q, k, v, mask, self.dropout)
        x = x.permute(0, 2, 1, 3).contiguous().view(batch_size, seq_len, dimension)
        return self.w_o(x)


        