import torch
import torch.nn as nn
from torch.nn import LayerNorm, Dropout

def clones(module, N):
    """创建N个相同的模块副本"""
    return nn.ModuleList([module for _ in range(N)])

class SublayerConnection(nn.Module):
    """
    实现子层连接的类
    """

    def __init__(self, size, dropout):
        super(SublayerConnection, self).__init__()
        self.norm = LayerNorm(size)
        self.dropout = nn.Dropout(dropout)

    def forward(self, x, sublayer):
        """Apply residual connection to any sublayer with the same size."""
        # return x + self.dropout(sublayer(self.norm(x)))

        # 原paper的方法
        sublayer_out = sublayer(x)
        x_norm = self.norm(x + self.dropout(sublayer_out))
        return x_norm


class EncoderLayer(nn.Module):
    """EncoderLayer is made up of two sublayer: self-attn and feed forward"""

    def __init__(self, size, self_attn, feed_forward, dropout):
        super(EncoderLayer, self).__init__()
        self.self_attn = self_attn
        self.feed_forward = feed_forward
        self.sublayer = clones(SublayerConnection(size, dropout), 2)
        self.size = size  # embedding's dimention of model, 例如512

    def forward(self, x, mask):
        # attention sub layer
        x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask))
        # feed forward sub layer
        z = self.sublayer[1](x, self.feed_forward)
        return z


if __name__ == "__main__":
    print("=== Encoder Layer 测试 ===")

    # 测试参数
    batch_size = 2
    seq_len = 10
    d_model = 512
    dropout = 0.1

    # 创建简单的前馈网络
    class SimpleFeedForward(nn.Module):
        def __init__(self, d_model, d_ff=2048, dropout=0.1):
            super(SimpleFeedForward, self).__init__()
            self.w_1 = nn.Linear(d_model, d_ff)
            self.w_2 = nn.Linear(d_ff, d_model)
            self.dropout = nn.Dropout(dropout)

        def forward(self, x):
            return self.w_2(self.dropout(torch.relu(self.w_1(x))))

    # 创建简单的自注意力模块用于测试
    class SimpleAttention(nn.Module):
        def __init__(self, d_model):
            super(SimpleAttention, self).__init__()
            self.linear = nn.Linear(d_model, d_model)

        def forward(self, query, key, value, mask=None):
            # 简化的注意力机制，只是通过线性层
            return self.linear(query)

    # 创建模块
    self_attn = SimpleAttention(d_model)
    feed_forward = SimpleFeedForward(d_model, dropout=dropout)
    encoder_layer = EncoderLayer(d_model, self_attn, feed_forward, dropout)

    # 创建测试数据
    x = torch.randn(batch_size, seq_len, d_model)
    mask = torch.ones(batch_size, seq_len).unsqueeze(-2)  # 无遮盖

    print(f"输入shape: {x.shape}")  # [2, 10, 512]
    print(f"mask shape: {mask.shape}")  # [2, 1, 10]

    # 测试子层连接
    print(f"\n--- 测试SublayerConnection ---")
    sublayer_conn = SublayerConnection(d_model, dropout)
    identity_func = lambda x: x  # 恒等函数作为测试子层
    sublayer_output = sublayer_conn(x, identity_func)
    print(f"SublayerConnection输出shape: {sublayer_output.shape}")  # [2, 10, 512]

    # 测试完整的编码器层
    print(f"\n--- 测试EncoderLayer ---")
    print("开始前向传播...")
    output = encoder_layer(x, mask)
    print(f"EncoderLayer输出shape: {output.shape}")  # [2, 10, 512]
    print(f"输入输出shape相同: {output.shape == x.shape}")

    # 验证梯度计算
    loss = output.sum()
    loss.backward()
    print(f"梯度计算成功: {encoder_layer.self_attn.linear.weight.grad is not None}")

    # 显示模块结构
    print(f"\n--- 模块结构 ---")
    print(f"EncoderLayer包含:")
    print(f"  - 自注意力模块: {type(encoder_layer.self_attn).__name__}")
    print(f"  - 前馈网络: {type(encoder_layer.feed_forward).__name__}")
    print(f"  - 子层连接数量: {len(encoder_layer.sublayer)}")
    print(f"  - 模型维度: {encoder_layer.size}")

    print(f"\n=== 测试完成 ===")