import torch
import math

scores = torch.randn(2,3,3) #假设是Q*V后得到的矩阵
inputs = torch.tensor([
    [1,2,3],
    [4,5,0]
])
print(scores)
mask = (inputs == 0).unsqueeze(1)
print(mask)
print(mask.shape)
scores = scores.masked_fill(mask,-1e9) #广播机制后填充负无穷
print(scores)
res = torch.softmax(scores,dim=-1)
print(res)

#生成padding掩码矩阵
def get_padding_mask(x,padding_idx):
    return (x == padding_idx).unsqueeze(1)


def attention(q,k,v,mask=None,dropout=None):
    #将k矩阵的最后一个维度值作为d_k
    d_k = k.size(-1)
    #将k矩阵的最后两个维度互换(转置),与q矩阵相乘,除以d_k开根号
    scores = torch.matmul(q,k.transpose(-2,-1)) / math.sqrt(d_k)
    if mask is not None:
        scores = scores.masked_fill(mask,-1e9)
    p_attn = torch.softmax(scores,dim=-1)
    if dropout is not None:
        p_attn = dropout(p_attn)
    return torch.matmul(p_attn,v)

