import torch
import torch.nn as nn

from feed_forward import PositionwiseFeedForward

test_input = torch.randint(0, 10, (4, 8))


class AddNormPost(nn.Module):
    """
    Post-Norm: y = LayerNorm(x + Sublayer(x))
    其中 Sublayer 通常是 Multi-Head Attention 或 FeedForward
    """

    def __init__(self, d_model=512, dropout=0.1):
        super(AddNormPost, self).__init__()
        """
        nn.LayerNorm(d_model)创建一个维度为d_model的层归一化层
        用于对输入张量的最后一个维度进行归一化处理
        通常用在Transformer架构中，对每个样本的特征维度独立进行标准化
        有助于稳定训练过程，加速收敛
        """
        self.norm = nn.LayerNorm(d_model)
        self.dropout = nn.Dropout(dropout)

    def forward(self, x, sublayer):
        """
        x: [B, L, d_model]
        sublayer: 可调用对象，接受 x 返回 same shape 的张量
        """
        return self.norm(x + self.dropout(sublayer(x)))


class FFNBlock(nn.Module):
    """
    FeedForward + Add & Norm
    """

    def __init__(self):
        super(FFNBlock, self).__init__()
        self.embedding = nn.Embedding(10, 512)
        self.position_encoding = PositionwiseFeedForward(512, 2048, 0.1)
        self.add_norm = AddNormPost(512, 0.1)

    def forward(self, x):
        x = self.embedding(x)
        x = self.add_norm(x, self.position_encoding)
        return x


if __name__ == '__main__':
    model = FFNBlock()
    print(model(test_input))
