from torch import nn
from transformers import BertForSequenceClassification


class LSTMClassifier(nn.Module):
    def __init__(self, vocab_size, embed_dim, hidden_dim, num_classes, padding_idx, num_layers, dropout_rate):
        super().__init__()
        self.embedding = nn.Embedding(vocab_size, embed_dim, padding_idx)
        self.lstm = nn.LSTM(embed_dim, hidden_dim, batch_first=True, num_layers=num_layers, bidirectional=False, dropout=dropout_rate)
        self.fc = nn.Linear(hidden_dim, num_classes)

    def forward(self, x):
        embeds = self.embedding(x)  # [batch_size, seq_len, embed_dim]
        lstm_out, (hn, cn) = self.lstm(embeds)  # LSTM的输出
        output = self.fc(hn[-1])  # 使用最后一层的隐藏状态并应用Dropout
        return output

    
def build_lstm(vocab_size, padding_idx, num_classes):

    lstm_params = {
        'embed_dim': 128,
        'hidden_dim': 128*4,
        'num_layers': 2,
        'dropout_rate': 0.
    }

    model = LSTMClassifier(vocab_size=vocab_size, num_classes=num_classes, padding_idx=padding_idx, **lstm_params)
    return model

def build_bert(vocab_size, padding_idx, num_classes, max_size):
    model = BertForSequenceClassification.from_pretrained("textattack/bert-base-uncased-yelp-polarity",
                                                        vocab_size=vocab_size, 
                                                        pad_token_id=padding_idx,
                                                        num_labels=num_classes,
                                                        max_position_embeddings=max_size, 

                                                        ignore_mismatched_sizes=True,
                                                        classifier_dropout=0.15)
    return model