import torch
import torch.nn as nn
from transformers import BertModel, BertConfig


class BERT4Rec(nn.Module):
    def __init__(self, vocab_size, hidden_dim, num_layers, max_seq_length):
        super(BERT4Rec, self).__init__()

        # 配置 BERT 模型
        config = BertConfig(
            vocab_size=vocab_size,
            hidden_size=hidden_dim,
            num_hidden_layers=num_layers,
            num_attention_heads=4,
            intermediate_size=hidden_dim * 2,
            max_position_embeddings=max_seq_length,
            type_vocab_size=1
        )

        # 使用 BERT 模型
        self.bert = BertModel(config)

        # 预测层
        self.output_layer = nn.Linear(hidden_dim, vocab_size)

    def forward(self, input_ids):
        # 传入 BERT
        outputs = self.bert(input_ids)

        # 获取最后一层的隐藏状态
        last_hidden_state = outputs.last_hidden_state

        # 使用最后一个隐藏状态进行预测
        logits = self.output_layer(last_hidden_state)
        return logits
