#多头自注意力机制实现
import numpy as np
import math
def sofmax(scores):

    exp_scores=np.exp(scores-np.max(scores,axis=-1,keepdims=True))
    softmax=exp_scores/np.sum(exp_scores,axis=-1,keepdims=True)

    return softmax

def str_matrix(str):

    return np.fromstring(str,dtype=int,sep=",")

def main():

    data=input().split(";")
    num_heads=data[0]
    X=str_matrix(data[1])
    print(X)
    # Q=np.ndarray(data[2])
    # K=np.ndarray(data[3])
    # V=np.ndarray(data[4])
    # wo=np.ndarray(data[5])


    #
    # #拆分多头
    #
    d_model=Q.shape[2]
    d_k=d_model/num_heads   #batchsize，num_heads,seq_len, d_k, 变换后的输出shape

    #注意力分数

    attention_scores=np.matmul(Q,K.T)/math.sqrt(d_k)
    print(attention_scores)

    #masked_score

    #注意力权重
    # softmax_scores=sofmax(masked_scores)

    #注意力输出
    # attention=np.matmul(softmax_scores,V)

if __name__ == '__main__':
    main()

