import torch.nn as nn


class LSTMClassifier(nn.Module):
    def __init__(self, vocab_size, embed_size, hidden_size, output_size, num_layers, dropout, embedding_matrix=None):
        super(LSTMClassifier, self).__init__()

        # 如果提供了embedding矩阵，就使用它来初始化Embedding层
        if embedding_matrix is not None:
            self.embedding = nn.Embedding.from_pretrained(embedding_matrix, freeze=False)
        else:
            self.embedding = nn.Embedding(vocab_size, embed_size, padding_idx=0)  # 如果没有提供，则默认初始化

        self.lstm = nn.LSTM(embed_size, hidden_size, num_layers, batch_first=True, dropout=dropout)
        self.fc = nn.Linear(hidden_size, output_size)
        self.dropout = nn.Dropout(dropout)

    def forward(self, x):
        x = self.embedding(x)
        lstm_out, (hn, cn) = self.lstm(x)
        out = self.fc(hn[-1])
        return out

