import torch
import torch.nn as nn

"""
在PyTorch中，@ 是矩阵乘法运算符，等价于 torch.matmul() 函数。
PyTorch的@运算符遵循广播机制
当张量维度大于2时，会自动将前导维度视为批量维度
只对最后两维进行实际的矩阵乘法运算
"""

test_input = torch.randint(0, 10, (4, 8))


class MultiHeadAttention(nn.Module):
    def __init__(self, d_model=512, num_heads=8, dropout=0.1):
        super(MultiHeadAttention, self).__init__()
        assert d_model % num_heads == 0
        self.d_k = d_model // num_heads
        self.num_heads = num_heads

        self.W_q = nn.Linear(d_model, d_model)  # 查询矩阵
        self.W_k = nn.Linear(d_model, d_model)  # 键矩阵
        self.W_v = nn.Linear(d_model, d_model)  # 值矩阵

        self.W_o = nn.Linear(d_model, d_model)  # 输出矩阵

        self.dropout = nn.Dropout(dropout)

    def forward(self, x):
        B, L, D = x.shape
        Q = self.W_q(x)
        K = self.W_k(x)
        V = self.W_v(x)

        # 分多头
        # Q.view(B, L, self.num_heads, self.d_k) - 将Q矩阵从(B, L, D)重塑为(B, L, num_heads, d_k)
        # .transpose(1, 2) - 交换维度1和2，最终形状变为(B, num_heads, L, d_k)
        Q = Q.view(B, L, self.num_heads, self.d_k).transpose(1, 2)
        K = K.view(B, L, self.num_heads, self.d_k).transpose(1, 2)
        V = V.view(B, L, self.num_heads, self.d_k).transpose(1, 2)

        # Scaled Dot-Product Attention
        # 1. 点积计算相似度
        # Q @ K.transpose(-2, -1) 计算查询(Q)和键(K)之间的点积
        # 点积值越大表示查询和键越相似
        # 这是注意力权重计算的基础
        # 2. 缩放因子的作用
        # /(self.d_k ** 0.5) 是缩放因子，用于防止点积值过大
        # 当d_k较大时，点积的方差会增大，导致softmax函数进入梯度很小的区域
        # 除以sqrt(d_k)可以稳定梯度，这在原始Transformer论文中有详细说明
        # 3. 维度分析
        # Q的形状: [B, num_heads, L, d_k]
        # K的形状: [B, num_heads, L, d_k]
        # K.transpose(-2, -1)的形状: [B, num_heads, d_k, L]
        # 点积结果scores的形状: [B, num_heads, L, L]
        scores = (Q @ K.transpose(-2, -1)) / (self.d_k ** 0.5)
        # 4. softmax归一化
        # torch.softmax(scores, dim=-1) 对最后一个维度进行softmax
        # 将注意力分数转换为概率分布，每行的和为1
        # 表示每个位置对其他位置的注意力权重
        attn = torch.softmax(scores, dim=-1)
        attn = self.dropout(attn)
        # 5. 加权求和
        # attn @ V 使用注意力权重对值(V)进行加权求和
        # 得到最终的注意力输出
        out = attn @ V  # [B, h, L, d_k]

        # 拼接 heads
        out = out.transpose(1, 2).reshape(B, L, D)
        out = self.W_o(out)
        return out


class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.embedding = nn.Embedding(10, 512)
        self.multi_head_attention = MultiHeadAttention(512, 8, 0.1)
        self.linear = nn.Linear(512, 10)

    def forward(self, x):
        x = self.embedding(x)
        x = self.multi_head_attention(x)
        x = self.linear(x)
        return x


if __name__ == '__main__':
    model = Net()
    print(model(test_input))
