import numpy as np
np.random.seed(114514)
def scaled_dot_product_attention(Q,K,V,mask=None):
    d_k = Q.shape[-1]
    scores = np.matmul(Q,np.transpose(K,axes=(0,1,3,2))) / np.sqrt(d_k)   
    if mask is not None:
        scores = np.where(mask == 0,-1e9,scores)  
    attention_weights = np.exp(scores)
    attention_weights = attention_weights / np.sum(attention_weights,axis=-1,keepdims=True)#手动softmax
    output = np.matmul(attention_weights,V)
    return output,attention_weights
def multi_head_attention(embed_size,num_heads,input,mask=None):
    #batch_size	当前批次里有多少条样本（句子数）。
    #seq_len	每条样本的序列长度（每个句子有多少个 token）
    #embed_size	每个 token 的向量维度
    #num_heads	多头注意力机制中包含的头数
    batch_size,seq_len,_ = input.shape
    head_dim = embed_size // num_heads
    #定义线性变换矩阵
    #行对应前面的列
    Wq = np.random.randn(embed_size,embed_size)
    Wk = np.random.randn(embed_size,embed_size)
    Wv = np.random.randn(embed_size,embed_size)
    Wo = np.random.randn(embed_size,embed_size)
    #线性变换
    Q = np.matmul(input,Wq)
    K = np.matmul(input,Wk)
    V = np.matmul(input,Wv)
    #多头注意力机制
    #先拆头后转置
    Q = Q.reshape(batch_size,seq_len,num_heads,head_dim).transpose(0,2,1,3)
    K = K.reshape(batch_size,seq_len,num_heads,head_dim).transpose(0,2,1,3)
    V = V.reshape(batch_size,seq_len,num_heads,head_dim).transpose(0,2,1,3)
    #缩放点积注意力
    attn_output,attn_weights = scaled_dot_product_attention(Q,K,V,mask)
    #多头注意力机制输出
    #先转置后合并
    attn_output = attn_output.transpose(0,2,1,3).reshape(batch_size,seq_len,embed_size)
    output = np.matmul(attn_output,Wo)
    weights = attn_weights[0]
    return output,weights
if __name__ == "__main__":
    batch_size = 10
    seq_len = 20
    embed_size = 128
    num_heads = 8
    input = np.random.randn(batch_size,seq_len,embed_size) 
    output,weights = multi_head_attention(embed_size,num_heads,input)
    print(output.shape,weights.shape)
    print(output[0][0][:10],weights[0][0][:10])
