import torch
import torch.nn as nn
import torch.nn.functional as F
import math

def attention(query, key, value, mask=None, dropout=None):
    """
    缩放点积注意力机制计算函数

    参数:
        query: 查询矩阵，shape = [batch_size, h, seq_len, d_k]
        key:   键矩阵，shape = [batch_size, h, seq_len, d_k]
        value: 值矩阵，shape = [batch_size, h, seq_len, d_k]
        mask:  遮盖矩阵，用于屏蔽无效位置
        dropout: dropout层

    返回:
        output: 注意力输出，shape = [batch_size, h, seq_len, d_k]
        p_attn: 注意力权重，shape = [batch_size, h, seq_len, seq_len]
    """
    # 获取最后一个维度的大小，即d_k
    d_k = query.size(-1)  # d_k = 64 (假设d_model=512, h=8)

    # 计算注意力分数：Q * K^T / sqrt(d_k)
    # query: [batch_size, h, seq_len, d_k]
    # key.transpose(-2, -1): [batch_size, h, d_k, seq_len]
    # scores: [batch_size, h, seq_len, seq_len]
    scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)

    # 如果有mask，将遮盖位置设为极小值
    if mask is not None:
        # mask: [batch_size, 1, 1, seq_len] 或 [batch_size, 1, seq_len, seq_len]
        scores = scores.masked_fill(mask == 0, -1e9)

    # 对最后一个维度应用softmax，得到注意力权重
    # p_attn: [batch_size, h, seq_len, seq_len]
    p_attn = torch.softmax(scores, dim=-1)

    # 应用dropout
    if dropout is not None:
        p_attn = dropout(p_attn)

    # 注意力权重与value相乘得到最终输出
    # p_attn: [batch_size, h, seq_len, seq_len]
    # value: [batch_size, h, seq_len, d_k]
    # output: [batch_size, h, seq_len, d_k]
    return torch.matmul(p_attn, value), p_attn

def clones(module, N):
    """
    创建N个相同的模块副本

    参数:
        module: 要复制的模块
        N: 复制的数量

    返回:
        nn.ModuleList: 包含N个模块副本的列表
    """
    return nn.ModuleList([module for _ in range(N)])

class MultiHeadedAttention(nn.Module):
    def __init__(self, h, d_model, dropout=0.1):
        """
        多头注意力机制初始化

        参数:
            h: 注意力头的数量
            d_model: 模型的维度
            dropout: dropout概率
        """
        super(MultiHeadedAttention, self).__init__()
        # 确保d_model能被h整除
        assert d_model % h == 0

        # 假设d_v = d_k = d_model / h
        self.d_k = d_model // h  # 每个头的维度，例如：512 // 8 = 64
        self.h = h  # 头的数量，例如：8

        # 创建4个线性变换层：3个用于Q、K、V投影，1个用于最终输出
        # 每个线性层都是 d_model -> d_model
        self.linears = clones(nn.Linear(d_model, d_model), 4)
        self.attn = None  # 存储注意力权重
        self.dropout = nn.Dropout(p=dropout)

    def forward(self, query, key, value, mask=None):
        """
        多头注意力前向传播，详细的shape变化过程

        输入shape说明（以示例为例）:
            query, key, value: [32, 15, 512] (batch_size=32, seq_len=15, d_model=512)
            mask: [32, 15] -> [32, 1, 15] (如果存在)
        """
        if mask is not None:
            # 为所有头添加相同的遮盖
            # mask原始: [32, 15] -> [32, 1, 15] (在第二个维度增加一维)
            mask = mask.unsqueeze(1)

        # 获取批次大小
        nbatches = query.size(0)  # nbatches = 32

        # ===== 步骤1: =====
        # 对query、key、value分别进行线性变换，然后重塑形状
        print(f"步骤1 - 输入shapes:")
        print(f"  query输入: {query.shape}")  # [32, 15, 512]
        print(f"  key输入: {key.shape}")      # [32, 15, 512]
        print(f"  value输入: {value.shape}")  # [32, 15, 512]

        query, key, value = \
            [l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2)
             for l, x in zip(self.linears, (query, key, value))]

        print(f"步骤1 :")
        print(f"  线性: [32, 15, 512] -> [32, 15, 512]")
        print(f"  rview后: [32, 15, 512] -> [32, 15, 8, 64]")  # view(nbatches, -1, h, d_k)
        print(f"  转置后: [32, 15, 8, 64] -> [32, 8, 15, 64]")    # transpose(1, 2)
        print(f"  最终query shape: {query.shape}")
        print(f"  最终key shape: {key.shape}")
        print(f"  最终value shape: {value.shape}")

        # ===== 步骤2: 对所有投影向量批量应用注意力 =====
        print(f"\n步骤2 - 注意力计算:")
        x, self.attn = attention(query, key, value, mask=mask, dropout=self.dropout)
        print(f"  注意力输出 x shape: {x.shape}")      # [32, 8, 15, 64]
        print(f"  注意力权重 attn shape: {self.attn.shape}")  # [32, 8, 15, 15]

        # ===== 步骤3: 连接多头并应用最终线性变换 =====
        print(f"\n步骤3 - 多头连接:")
        print(f"  转置前: {x.shape}")  # [32, 8, 15, 64]

        x = x.transpose(1, 2).contiguous()  # [32, 8, 15, 64] -> [32, 15, 8, 64]
        print(f"  转置后: {x.shape}")

        x = x.view(nbatches, -1, self.h * self.d_k)  # [32, 15, 8, 64] -> [32, 15, 512]
        print(f"  重塑后: {x.shape}")

        final_output = self.linears[-1](x)  # 最后一个线性层 [32, 15, 512] -> [32, 15, 512]
        print(f"  最终输出: {final_output.shape}")

        return final_output


if __name__ == "__main__":
    # 设置测试参数
    n_batch = 32    # 批次大小
    seq_len = 15    # 序列长度
    d_model = 512   # 模型维度
    h = 8           # 注意力头数量

    # 创建随机输入张量
    x = torch.randn(n_batch, seq_len, d_model)
    print(f"原始输入张量 x shape: {x.shape}")  # [32, 15, 512]

    """
    多头注意力机制测试
    """
    # 创建遮盖矩阵（这里使用全1矩阵，表示没有遮盖）
    mask = torch.ones(n_batch, seq_len).unsqueeze(-2)
    print(f"遮盖矩阵 mask shape: {mask.shape}")  # [32, 1, 15]

    # 创建多头注意力模块
    m_attn = MultiHeadedAttention(h, d_model)
    print(f"\n开始多头注意力计算...\n")

    # 执行前向传播（query, key, value都使用相同的输入x，这是自注意力）
    output = m_attn(x, x, x, mask)

    print(f"\n=== 最终结果 ===")
    print(f"输出shape: {output.shape}")  # [32, 15, 512]
    print(f"输入输出shape相同: {output.shape == x.shape}")

    # 验证梯度计算
    loss = output.sum()
    loss.backward()
    print(f"梯度计算成功: {m_attn.linears[0].weight.grad is not None}")