import torch
import torch.nn as nn


# class SingleHeadAttention(nn.Module):
#     def __init__(self, n_dims, n_keys) -> None:
#         super().__init__()
#         # n_keys 查询/键/值的数量
#         # n_dims 词向量维度
#         self.n_keys = n_keys
#         self.n_dims = n_dims
#         self.W_k = nn.Linear(n_dims, n_keys)
#         self.W_q = nn.Linear(n_dims, n_keys)
#         self.W_v = nn.Linear(n_dims, n_keys)

#     def forward(self, X):
#         # X'shape is (batch_size, n_vocabs, n_dims)
#         key = self.W_k(X)
#         quary = self.W_q(X)
#         value = self.W_v(X)
#         # the shape above is (batch_size, n_vocabs, n_keys)
#         score = quary @ torch.transpose(key, -1, -2) / self.n_keys**0.5
#         score = torch.softmax(score, dim=-1)
#         out = score @ value
#         return out


# class MultiheadAttention(nn.Module):
#     def __init__(self, n_dims, n_heads, n_keys) -> None:
#         super().__init__()
#         # n_heads注意力头的数量
#         # n_keys 查询/键/值的数量
#         self.n_keys = n_keys
#         self.n_heads = n_heads

#         assert n_keys % n_heads == 0 
#         # 查询/键/值的数量是平均分配给多个头的
#         self.heads = [SingleHeadAttention(n_dims, n_keys//n_heads) for _ in range(n_heads)]

#     def forward(self, X):
#         out = torch.cat([head(X) for head in self.heads], dim=-1)
#         return out

'''
上面的实现是最直观的
实际中多头注意力的计算完全可以放在一个矩阵中
设想一下，每个头的都是X @ W = (n, n_keys//n_heads)
完全可以做一个大的W，这样X @ W = (n, n_keys)
'''
class MultiHeadAttention(nn.Module):
    def __init__(self, n_dims, n_heads, n_keys, mask=True) -> None:
        super().__init__()
        # n_dims词向量维度
        # n_heads注意力头的数量
        # n_keys 查询/键/值的数量
        assert n_keys % n_heads == 0
        self.n_keys = n_keys
        self.n_heads = n_heads
        self.mask = mask
        self.n_singlehead = n_keys//n_heads
        self.W_k = nn.Linear(n_dims, n_keys)
        self.W_q = nn.Linear(n_dims, n_keys)
        self.W_v = nn.Linear(n_dims, n_keys)
        self.fullconnection = nn.Linear(n_keys, n_dims)

    def forward(self, X):
        batch_size = X.shape[0]
        n_vocabs = X.shape[1]
        key = self.W_k(X).view((batch_size, -1, self.n_heads, self.n_singlehead)).transpose(1,2)
        quary = self.W_q(X).view((batch_size, -1, self.n_heads, self.n_singlehead)).transpose(1,2)
        value = self.W_v(X).view((batch_size, -1, self.n_heads, self.n_singlehead)).transpose(1,2)
        # shape (batch_size, n_vocabs, n_keys) 然后拆分成单个head方便计算 shape(batch_size, n_heads, n_vocabs, n_singlehead)
        score = quary @ key.transpose(-1, -2) / self.n_singlehead**0.5 # 这边除以这个东西，使得所有输出的的值都保持在一个合理范围
        # 如果方差太大，softmax会趋向于onehot的形式
        # shape (batch_size, n_heads, n_vocabs, n_vocabs)
        if self.mask:
            score[:] = torch.masked_fill(score, torch.tril(torch.ones((n_vocabs, n_vocabs)))==0, float('-inf'))
        score[:] = torch.softmax(score, dim=-1)
        # print(score)
        out = score @ value
        # print(out.shape)
        # shape(batch_size, n_heads, n_vocabs, n_singlehead)
        out = torch.transpose(out, 1, 2).contiguous().view((batch_size, -1, self.n_keys))
        out = self.fullconnection(out)
        return out

if __name__ == "__main__":
    X = torch.ones((3, 5, 5)).float()
    net = MultiHeadAttention(X.shape[2], 4, 16)
    Y = net(X)
    print(Y.shape)
    print(Y)