import torch
import torch.nn as nn
import torch.nn.functional as F


# input  : (batch_size, seq_len, feature)
# output : (batch_size, seq_len, out_vocab)


# Decoder -> Generator(线性层 + softmax) -> 输出 probabilities vector
# 主要起到 降维的作用，将 解码器输出的高维向量 转为 词汇表向量的长度
class GeneratorLayer(nn.Module):
    def __init__(self, d_model = 512, vocab = 1024):
        super(GeneratorLayer, self).__init__()
        # 进行维度变换，将 d_model -> vocab
        self.linear = nn.Linear(d_model, vocab)

    def forward(self, x):
        return F.log_softmax(self.linear(x), dim = -1)


if __name__ == "__main__":
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(device)

    layer = GeneratorLayer(512, 10).to(device)

    # 随机初始化参数
    for p in layer.parameters():
        if p.dim() > 1:
            nn.init.xavier_uniform_(p)

    layer.eval()

    # 制造一个随机输入张量，假设 batch_size=2，序列长度=10，特征维度=d_model=512
    x = torch.randn(2, 10, 512).to(device)

    out = layer(x)

    print(x)
    print(out)

    print("Input shape:", x.shape)
    print("Output shape:", out.shape)
