import math
import torch
import torch.nn as nn
import torch.nn.functional as F

class SelfAttentionV1(nn.Module):
    def __init__(self, dim):
        super().__init__()
        self.dim = dim
        self.fcl = nn.Linear(dim, dim * 3)

    def forward(self, X):
        QKV = self.fcl(X)
        # 效率优化：Q, K, V一起计算
        Q, K, V = torch.split(QKV, self.dim, -1)
        atten = F.softmax(
            torch.matmul(Q, K.transpose(-1, -2)) / math.sqrt(self.dim),
            dim = -1
        )
        output = torch.matmul(atten, V)
        return output

# (batch_size, seq_len, dim)
X = torch.rand(2, 16, 8)
self_attention = SelfAttentionV1(8)
output = self_attention(X)
print(output)