
import torch
import torch.nn as nn
import torch.nn.functional as F

# input  : (batch_size, seq_len, feature)
# output : (batch_size, seq_len, feature)

# Input (d_model)
#     │
#     ▼
# +------------+     +------+     +---------+     +------------+     +------------+
# | Linear     | --> | ReLU | --> | Dropout | --> | Linear     | --> | Output     |
# | d_model→d_ff|     +------+     +---------+     | d_ff→d_model|     | (d_model)  |
# +------------+                                   +------------+     +------------+

class FeedForwardLayer(nn.Module):

    def __init__(self, d_model = 512, d_ff = 2048, dropout = 0.1):
        super(FeedForwardLayer, self).__init__()
        # print("FeedForwardLayer init with:", d_model, d_ff)

        self.linear1 = nn.Linear(d_model, d_ff)
        self.dropout = nn.Dropout(dropout)
        self.linear2 = nn.Linear(d_ff, d_model)

    def forward(self, x):
        x = self.linear1(x)
        x = F.relu(x)
        x = self.dropout(x)
        x = self.linear2(x)
        return x


if __name__ == "__main__":
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(device)

    layer = FeedForwardLayer().to(device)

    # 随机初始化参数
    for p in layer.parameters():
        if p.dim() > 1:
            nn.init.xavier_uniform_(p)

    layer.eval()

    # 制造一个随机输入张量，假设 batch_size=2，序列长度=10，特征维度=d_model=512
    x = torch.randn(2, 10, 512).to(device)

    out = layer(x)

    print(x)
    print(out)

    print("Input shape:", x.shape)
    print("Output shape:", out.shape)

