
import torch
import torch.nn as nn
import torch.functional as F

def self_attention(querys,keys,values):
    # 返回倒数第一个维度的大小
    d_k = querys.size(-1) 
    scores = torch.matmul(keys,keys.T) / d_k
    attention = F.softmax(scores)
    return torch.matmul(attention,values),attention


class MultiHeadAttention(nn.Module):
    def __init__(self):
        super(MultiHeadAttention,self).__init__()

    def forward(self):
        pass


class Transformer(nn.Module):

    def __init__(self):
        super(Transformer,self).__init__()
        pass

    def forward(self):
        pass