import torch
import torch.nn as nn
import torch.nn.functional as F

class PositionwiseFeedForward(nn.Module):
    def __init__(self, d_model, d_ff, dropout=0.1):
        # 初始化函数有三个输入参数分别是d_model, d_ff, 和dropout=0.1, 第一个是线性层的输入维度也是第二个线性层的输出维度,
        # 因为我们希望输入通过前馈全连接层后输入和输出的维度不变, 第二个参数d_ff就是第二个线性层的输入维度和第一个线性层的输出,
        # 最后一个是dropout置0比率.
        super(PositionwiseFeedForward, self).__init__()
        self.w_1 = nn.Linear(d_model, d_ff)
        self.w_2 = nn.Linear(d_ff, d_model)
        self.dropout = nn.Dropout(dropout)

    def forward(self, x):
        # 输入参数为x, 代表来自上一层的输出, 首先经过第一个线性层, 然后使用F中的relu函数进行激活, 之后再使用dropout进行随机置0,
        # 最后通过第二个线性层w2, 返回最终结果
        return self.w_2(self.dropout(F.relu(self.w_1(x))))


if __name__ == "__main__":
    print("=== 位置前馈网络测试 ===")

    # 测试参数
    batch_size = 2
    seq_len = 10
    d_model = 512
    d_ff = 2048
    dropout = 0.1

    # 创建前馈网络
    ff_network = PositionwiseFeedForward(d_model, d_ff, dropout)

    # 创建测试数据
    x = torch.randn(batch_size, seq_len, d_model)
    print(f"输入张量shape: {x.shape}")  # [2, 10, 512]

    # 测试前向传播
    print(f"\n--- 前向传播测试 ---")
    output = ff_network(x)
    print(f"输出张量shape: {output.shape}")  # [2, 10, 512]
    print(f"输入输出shape相同: {output.shape == x.shape}")

    # 查看网络结构
    print(f"\n--- 网络结构 ---")
    print(f"第一层线性变换: {d_model} → {d_ff}")
    print(f"第二层线性变换: {d_ff} → {d_model}")
    print(f"激活函数: ReLU")
    print(f"Dropout概率: {dropout}")

    # 测试各层的输出维度
    print(f"\n--- 各层输出维度 ---")
    with torch.no_grad():
        # 第一层输出
        w1_output = ff_network.w_1(x)
        print(f"第一层线性变换后: {w1_output.shape}")  # [2, 10, 2048]

        # ReLU激活后
        relu_output = F.relu(w1_output)
        print(f"ReLU激活后: {relu_output.shape}")  # [2, 10, 2048]

        # Dropout后
        dropout_output = ff_network.dropout(relu_output)
        print(f"Dropout后: {dropout_output.shape}")  # [2, 10, 2048]

        # 第二层输出
        final_output = ff_network.w_2(dropout_output)
        print(f"第二层线性变换后: {final_output.shape}")  # [2, 10, 512]

    # 验证梯度计算
    print(f"\n--- 梯度计算测试 ---")
    loss = output.sum()
    loss.backward()
    print(f"第一层权重梯度存在: {ff_network.w_1.weight.grad is not None}")
    print(f"第二层权重梯度存在: {ff_network.w_2.weight.grad is not None}")

    # 测试不同的输入维度
    print(f"\n--- 不同维度测试 ---")
    # 测试单个样本
    single_x = torch.randn(1, 5, d_model)
    single_output = ff_network(single_x)
    print(f"单样本输入: {single_x.shape} → 输出: {single_output.shape}")

    # 测试大批次
    large_batch_x = torch.randn(32, 20, d_model)
    large_batch_output = ff_network(large_batch_x)
    print(f"大批次输入: {large_batch_x.shape} → 输出: {large_batch_output.shape}")

    print(f"\n=== 测试完成 ===")