import torch
import torch.nn as nn
import torch.nn.functional as F
import math

class SelfAttention(nn.Module):
    def __init__(self, embed_size, heads):
        """
        :param embed_size: 词嵌入维度（d_model）
        :param heads: 注意力头的数量
        """
        super(SelfAttention, self).__init__()
        self.embed_size = embed_size
        self.heads = heads
        self.head_dim = embed_size // heads

        assert (
            self.head_dim * heads == embed_size
        ), "Embedding size needs to be divisible by heads"

        # 定义线性层用于生成 Q, K, V
        self.w_q = nn.Linear(embed_size, embed_size, bias=False)
        self.w_k = nn.Linear(embed_size, embed_size, bias=False)
        self.w_v = nn.Linear(embed_size, embed_size, bias=False)

        # 最后的输出线性变换
        self.w_o = nn.Linear(embed_size, embed_size)


    def forward(self, x: torch.Tensor, mask: torch.Tensor = None):
        """
        :param x: 输入张量 [batch_size, seq_len, embed_size]
        :param mask: 可选掩码 [batch_size, seq_len]
        :return: 输出张量 [batch_size, seq_len, embed_size]
        """
        # 解析输入维度
        batch_size, seq_len, _ = x.shape

        # 多头拆分
        # [batch_size, seq_len, embed_size] --> [batch_size, seq_len, head, head_dim]
        q = self.w_q(x).view(batch_size, seq_len, self.heads, self.head_dim)
        k = self.w_k(x).view(batch_size, seq_len, self.heads, self.head_dim)
        v = self.w_v(x).view(batch_size, seq_len, self.heads, self.head_dim)
        # [batch_size, seq_len, head, head_dim] --> [batch_size, head, seq_len, head_dim]
        q = q.transpose(1, 2)
        k = k.transpose(1, 2)
        v = v.transpose(1, 2)

        # 注意力权重计算
        # weight: [batch_size, head, seq_len, seq_len]
        weight = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.head_dim)

        # mask & dropout

        # softmax
        attention = F.softmax(weight, dim=-1)

        # 注意力得分
        # score: [batch_size, head, seq_len, head_dim]
        score = torch.matmul(attention, v)

        # 拼接注意力得分
        # score: [batch_size, seq_len, embed_size]
        score = score.transpose(1, 2).contiguous()
        score = score.view(batch_size, seq_len, self.embed_size)

        # mlp层输出
        out = self.w_o(score)

        return out


if __name__ == "__main__":
    # 可重复性
    torch.manual_seed(10)           # 设置 CPU 生成随机数的种子
    torch.cuda.manual_seed(10)      # 为当前 GPU 设置随机种子

    # 示例参数
    batch_size = 32
    seq_len = 20
    embed_size = 512
    heads = 8

    # 创建输入
    x = torch.randn(batch_size, seq_len, embed_size)

    # 初始化自注意力模块
    self_attention = SelfAttention(embed_size=embed_size, heads=heads)

    # 前向传播
    output = self_attention(x)

    print("Output shape:", output.shape)  # [32, 20, 512]
