import torch
import torch.nn as nn
import torch.nn.functional as F
 
class TransformerBlock(nn.Module):
    def __init__(self, embed_dim, num_heads, dropout=0.1):
        super(TransformerBlock, self).__init__()
        self.attn = nn.MultiheadAttention(embed_dim, num_heads, dropout=dropout)
        self.ffn = nn.Sequential(
            nn.Linear(embed_dim, 4 * embed_dim),
            nn.GELU(),
            nn.Linear(4 * embed_dim, embed_dim),
        )
        self.norm1 = nn.LayerNorm(embed_dim)
        self.norm2 = nn.LayerNorm(embed_dim)
        self.dropout = nn.Dropout(dropout)
 
    def forward(self, x):
        # 输入表示
        # x: (seq_len, batch_size, embed_dim)
        attn_output, _ = self.attn(x, x, x)  # 自注意力，输入和输出都是x
        attn_output = self.dropout(attn_output)
        x = self.norm1(x + attn_output)  # 加权求和和残差连接
 
        # 前馈网络
        ffn_output = self.ffn(x)
        ffn_output = self.dropout(ffn_output)
        x = self.norm2(x + ffn_output)  # 加权求和和残差连接
 
        return x
 
class TextTransformer(nn.Module):
    def __init__(self, vocab_size, embed_dim, num_heads, num_layers, dropout=0.1):
        super(TextTransformer, self).__init__()
        self.embedding = nn.Embedding(vocab_size, embed_dim)
        self.positional_encoding = nn.Parameter(torch.randn(1, 1, embed_dim))
        self.encoder = nn.Sequential(*[TransformerBlock(embed_dim, num_heads, dropout) for _ in range(num_layers)])
        self.fc_out = nn.Linear(embed_dim, vocab_size)  # 假设是分类任务
 
    def forward(self, x):
        # 输入表示
        embeds = self.embedding(x)  # (batch_size, seq_len, embed_dim)
        embeds = embeds + self.positional_encoding[:, :embeds.size(1), :]  # 添加位置编码
        embeds = embeds.transpose(0, 1)  # (seq_len, batch_size, embed_dim)
 
        # 计算注意力权重和加权求和
        out = self.encoder(embeds)
 
        # 输出
        out = out.transpose(0, 1)  # (batch_size, seq_len, embed_dim)
        out = self.fc_out(out[:, -1, :])  # 假设只取序列的最后一个向量进行分类
 
        return out
 
# 模型参数
vocab_size = 10000  # 词汇表大小
embed_dim = 256  # 嵌入层维度
num_heads = 8  # 注意力头数
num_layers = 6  # Transformer层数
 
# 实例化模型
model = TextTransformer(vocab_size, embed_dim, num_heads, num_layers)
 
# 随机生成一个输入序列
input_seq = torch.randint(0, vocab_size, (32, 100))  # (batch_size, seq_len)
 
# 前向传播
output = model(input_seq)
print(output.shape)  # 应该输出 (batch_size, vocab_size)