import torch
import torch.nn as nn
import torch.nn.functional as F

# 假设的BERT模型简化版（仅作示例）
class SimplifiedBERT(nn.Module):
    def __init__(self, vocab_size, hidden_size, num_classes):
        super(SimplifiedBERT, self).__init__()
        self.embeddings = nn.Embedding(vocab_size, hidden_size)
        # 假设只有一层Transformer层（实际BERT有多层）
        self.transformer = nn.TransformerEncoderLayer(d_model=hidden_size, nhead=8)
        self.fc = nn.Linear(hidden_size, num_classes)

    def forward(self, x):
        # x的shape假设为[batch_size, seq_length]
        x = self.embeddings(x)  # [batch_size, seq_length, hidden_size]
        # 假设输入已经是加了位置编码的
        output = self.transformer(x.unsqueeze(0))  # Transformer需要加上batch维度
        output = torch.mean(output, dim=1)  # 对序列取平均得到句子表示
        output = self.fc(output)
        return F.log_softmax(output, dim=1)  # 返回log_softmax，便于与NLLLoss结合

# 实例化模型
model = SimplifiedBERT(vocab_size=10000, hidden_size=768, num_classes=2)
