mha = MultiHeadAttention()
enc_inputs = torch.rand((2,5,512)) 
# enc_inputs: [batch_size, src_len, d_model]
seq_q = torch.tensor([[6, 1, 2, 3, 5],
                          [6, 1, 2, 3, 4]])
seq_k = torch.tensor([[1, 2, 3, 5, 0],
                          [1, 2, 3, 4, 0]])
# seq_q:[batch_size,len_q] (2,5)
# seq_k:[batch_size,len_k] (2,5) 
self_attn_mask=get_attn_pad_mask(seq_q,seq_k) 
# self_attn_mask:(2,5,5)
outputs = mha(enc_inputs, enc_inputs, enc_inputs, self_attn_mask)
#outputs:[batch_size, len_q, d_model] (2,5,512)
