import torch
import torch.nn as nn
import torch.nn.functional as F
import math

class SelfAttention(nn.Module):
    def __init__(self, embed_size, heads):
        """
        :param embed_size: 词嵌入维度（d_model）
        :param heads: 注意力头的数量
        """
        super(SelfAttention, self).__init__()
        self.embed_size = embed_size
        self.heads = heads
        self.head_dim = embed_size // heads

        assert (
            self.head_dim * heads == embed_size
        ), "Embedding size needs to be divisible by heads"

        # 定义线性层用于生成 Q, K, V
        self.values = nn.Linear(embed_size, embed_size, bias=False)
        self.keys = nn.Linear(embed_size, embed_size, bias=False)
        self.queries = nn.Linear(embed_size, embed_size, bias=False)

        # 最后的输出线性变换
        self.fc_out = nn.Linear(embed_size, embed_size)

    def forward(self, x, mask=None):
        """
        :param x: 输入张量 [batch_size, seq_len, embed_size]
        :param mask: 可选掩码 [batch_size, seq_len]
        :return: 输出张量 [batch_size, seq_len, embed_size]
        """
        batch_size, seq_len, _ = x.shape

        # 分割成多个 head
        values = self.values(x).view(batch_size, seq_len, self.heads, self.head_dim)
        keys = self.keys(x).view(batch_size, seq_len, self.heads, self.head_dim)
        queries = self.queries(x).view(batch_size, seq_len, self.heads, self.head_dim)

        # 调整维度以便进行矩阵相乘: [batch_size, heads, seq_len, head_dim]
        values = values.transpose(1, 2)
        keys = keys.transpose(1, 2)
        queries = queries.transpose(1, 2)

        # 计算注意力得分
        energy = torch.matmul(queries, keys.transpose(-2, -1)) / math.sqrt(self.head_dim)

        # 如果有 mask，应用 mask
        if mask is not None:
            mask = mask.unsqueeze(1).unsqueeze(2)  # [B, 1, 1, seq_len]
            energy = energy.masked_fill(~mask, -1e9)

        # softmax 得到注意力权重
        attention = F.softmax(energy, dim=-1)

        # 点乘 value
        out = torch.matmul(attention, values)  # [B, H, seq_len, head_dim]

        # 合并多头
        out = out.transpose(1, 2).contiguous()
        out = out.view(batch_size, seq_len, self.embed_size)

        # 输出线性变换
        out = self.fc_out(out)

        return out


if __name__ == "__main__":
    # 可重复性
    torch.manual_seed(10)           # 设置 CPU 生成随机数的种子
    torch.cuda.manual_seed(10)      # 为当前 GPU 设置随机种子

    # 示例参数
    batch_size = 32
    seq_len = 20
    embed_size = 512
    heads = 8

    # 创建输入
    x = torch.randn(batch_size, seq_len, embed_size)

    # 初始化自注意力模块
    self_attention = SelfAttention(embed_size=embed_size, heads=heads)

    # 前向传播
    output = self_attention(x)

    print("Output shape:", output.shape)  # [32, 20, 512]
