import torch
import torch.nn as nn

def clones(module, N):
    """创建N个相同的模块副本"""
    return nn.ModuleList([module for _ in range(N)])

class SublayerConnection(nn.Module):
    """子层连接，包含残差连接和层归一化"""
    def __init__(self, size, dropout):
        super(SublayerConnection, self).__init__()
        self.norm = nn.LayerNorm(size)
        self.dropout = nn.Dropout(dropout)

    def forward(self, x, sublayer):
        """对子层应用残差连接"""
        return x + self.dropout(sublayer(self.norm(x)))

class DecoderLayer(nn.Module):
    """Decoder is made of self-attn, src-attn, and feed forward (defined below)"""

    def __init__(self, size, self_attn, src_attn, feed_forward, dropout):
        super(DecoderLayer, self).__init__()
        self.size = size
        self.self_attn = self_attn
        self.src_attn = src_attn
        self.feed_forward = feed_forward
        # 接照结构图使用clones函数克隆三个子层连接对象
        self.sublayer = clones(SublayerConnection(size, dropout), 3)

    def forward(self, x, memory, src_mask, tgt_mask):
        # forward函数中的参数有4个, 分别是来自上一层的输入x, 来自编码器层的语义存储变量memory, 以及源数据掩码张量和目标数据掩码张量,
        # 将memory表示成m方便使用.
        """Follow Figure 1 (right) for connections."""
        m = memory
        x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, tgt_mask))
        x = self.sublayer[1](x, lambda x: self.src_attn(x, m, m, src_mask))
        return self.sublayer[2](x, self.feed_forward)


if __name__ == "__main__":
    print("=== Decoder Layer 测试 ===")

    # 测试参数
    batch_size = 2
    src_seq_len = 8   # 源序列长度
    tgt_seq_len = 6   # 目标序列长度
    d_model = 512
    dropout = 0.1

    # 创建简单的注意力模块用于测试
    class SimpleAttention(nn.Module):
        def __init__(self, d_model):
            super(SimpleAttention, self).__init__()
            self.linear = nn.Linear(d_model, d_model)

        def forward(self, query, key, value, mask=None):
            # 简化的注意力机制
            return self.linear(query)

    # 创建简单的前馈网络
    class SimpleFeedForward(nn.Module):
        def __init__(self, d_model, d_ff=2048, dropout=0.1):
            super(SimpleFeedForward, self).__init__()
            self.w_1 = nn.Linear(d_model, d_ff)
            self.w_2 = nn.Linear(d_ff, d_model)
            self.dropout = nn.Dropout(dropout)

        def forward(self, x):
            return self.w_2(self.dropout(torch.relu(self.w_1(x))))

    # 创建模块
    self_attn = SimpleAttention(d_model)
    src_attn = SimpleAttention(d_model)
    feed_forward = SimpleFeedForward(d_model, dropout=dropout)
    decoder_layer = DecoderLayer(d_model, self_attn, src_attn, feed_forward, dropout)

    # 创建测试数据
    x = torch.randn(batch_size, tgt_seq_len, d_model)  # 目标序列
    memory = torch.randn(batch_size, src_seq_len, d_model)  # 编码器输出
    src_mask = torch.ones(batch_size, src_seq_len).unsqueeze(-2)  # 源序列mask
    tgt_mask = torch.ones(batch_size, tgt_seq_len).unsqueeze(-2)  # 目标序列mask

    print(f"输入张量 shapes:")
    print(f"  目标序列 x: {x.shape}")  # [2, 6, 512]
    print(f"  编码器记忆 memory: {memory.shape}")  # [2, 8, 512]
    print(f"  源序列mask: {src_mask.shape}")  # [2, 1, 8]
    print(f"  目标序列mask: {tgt_mask.shape}")  # [2, 1, 6]

    # 测试子层连接
    print(f"\n--- 测试SublayerConnection ---")
    sublayer_conn = SublayerConnection(d_model, dropout)
    identity_func = lambda x: x  # 恒等函数作为测试子层
    sublayer_output = sublayer_conn(x, identity_func)
    print(f"SublayerConnection输出shape: {sublayer_output.shape}")  # [2, 6, 512]

    # 测试完整的解码器层
    print(f"\n--- 测试DecoderLayer ---")
    print("开始前向传播...")
    print("第一个子层: 自注意力 (目标序列内部的注意力)")
    print("第二个子层: 源注意力 (目标序列对编码器输出的注意力)")
    print("第三个子层: 前馈网络")

    output = decoder_layer(x, memory, src_mask, tgt_mask)
    print(f"DecoderLayer输出shape: {output.shape}")  # [2, 6, 512]
    print(f"输入输出shape相同: {output.shape == x.shape}")

    # 验证梯度计算
    print(f"\n--- 梯度计算测试 ---")
    loss = output.sum()
    loss.backward()
    print(f"自注意力模块梯度存在: {decoder_layer.self_attn.linear.weight.grad is not None}")
    print(f"源注意力模块梯度存在: {decoder_layer.src_attn.linear.weight.grad is not None}")
    print(f"前馈网络模块梯度存在: {decoder_layer.feed_forward.w_1.weight.grad is not None}")

    # 显示模块结构
    print(f"\n--- 模块结构分析 ---")
    print(f"DecoderLayer包含:")
    print(f"  - 自注意力模块: {type(decoder_layer.self_attn).__name__}")
    print(f"  - 源注意力模块: {type(decoder_layer.src_attn).__name__}")
    print(f"  - 前馈网络: {type(decoder_layer.feed_forward).__name__}")
    print(f"  - 子层连接数量: {len(decoder_layer.sublayer)}")
    print(f"  - 模型维度: {decoder_layer.size}")

    # 测试三个子层的独立功能
    print(f"\n--- 三个子层独立测试 ---")
    with torch.no_grad():
        # 第一个子层：自注意力
        x1 = decoder_layer.sublayer[0](x, lambda x: decoder_layer.self_attn(x, x, x, tgt_mask))
        print(f"第一子层(自注意力)输出shape: {x1.shape}")

        # 第二个子层：源注意力
        x2 = decoder_layer.sublayer[1](x1, lambda x: decoder_layer.src_attn(x, memory, memory, src_mask))
        print(f"第二子层(源注意力)输出shape: {x2.shape}")

        # 第三个子层：前馈网络
        x3 = decoder_layer.sublayer[2](x2, decoder_layer.feed_forward)
        print(f"第三子层(前馈网络)输出shape: {x3.shape}")

        # 验证与完整前向传播结果一致
        print(f"分步计算与完整forward一致: {torch.allclose(x3, output, atol=1e-6)}")

    # 测试不同的序列长度
    print(f"\n--- 不同序列长度测试 ---")
    # 更长的序列
    x_long = torch.randn(1, 12, d_model)
    memory_long = torch.randn(1, 15, d_model)
    src_mask_long = torch.ones(1, 15).unsqueeze(-2)
    tgt_mask_long = torch.ones(1, 12).unsqueeze(-2)

    output_long = decoder_layer(x_long, memory_long, src_mask_long, tgt_mask_long)
    print(f"长序列测试 - 输入: {x_long.shape} → 输出: {output_long.shape}")

    print(f"\n=== 测试完成 ===")
    print(f"\nDecoderLayer总结:")
    print(f"- 包含三个子层：自注意力、源注意力(编码器-解码器注意力)、前馈网络")
    print(f"- 每个子层都有残差连接和层归一化")
    print(f"- 支持不同长度的源序列和目标序列")
    print(f"- 通过mask机制控制注意力范围")