from torch import nn
import paddle
import paddlenlp as nlp

class LSTMModel(nn.Layer):
    # 定义LSTM组件
    def __init__(self,
                 vocab_size,
                 num_classes,
                 emb_dim=128,
                 padding_idx=0,
                 lstm_hidden_size=198,
                 direction='forward',
                 lstm_layers=1,
                 dropout_rate=0.0,
                 pooling_type=None,
                 fc_hidden_size=96):
        super().__init__()
        # 设置embedding层，embedding的数量和词汇数量（vocab_size）一致
        self.embedder = nn.Embedding(
            num_embeddings=vocab_size,
            embedding_dim=emb_dim,
            padding_idx=padding_idx)
        # 调用LSTM编码器，设置为LSTM的中间处理过程
        self.lstm_encoder = nlp.seq2vec.LSTMEncoder(
            emb_dim,
            lstm_hidden_size,
            num_layers=lstm_layers,
            direction=direction,
            dropout=dropout_rate,
            pooling_type=pooling_type)
        # 设置一层前连接层
        self.fc = nn.Linear(self.lstm_encoder.get_output_dim(), fc_hidden_size)
        # 设置输出层，输出神经元和标签数目一致（num_classes）
        self.output_layer = nn.Linear(fc_hidden_size, num_classes)

    def forward(self, text, seq_len):
        # 将输入索引到对应的embedding上
        embedded_text = self.embedder(text)
        # 使用LSTM编码器处理输入对应的embedding
        text_repr = self.lstm_encoder(embedded_text, sequence_length=seq_len)
        # 经过一层全连接中间层和tanh激活函数
        fc_out = paddle.tanh(self.fc(text_repr))
        # 经过输出层得到概率分布
        logits = self.output_layer(fc_out)
        return logits