# -*- coding: utf-8 -*-
# @Time : 2021-11-17 19:06
# @Author : lwb
# @Site : 
# @File : LSTM.py
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence


class LSTM(nn.Module):
    def __init__(self,vocab_size,embedding_dim,hidden_dim,num_class):
        super(LSTM, self).__init__()
        self.embedding=nn.Embedding(vocab_size,embedding_dim)
        # 卷积核，padding=1 表示在卷积操作之前，将序列的前后个补充一个输出，filter_size（卷积核大小),num_filter（卷积核的个数
        self.lstm=nn.LSTM(embedding_dim,hidden_dim,batch_first=True)  # batch_first 即第一个维度是batch的大小
        self.activate=F.relu
        self.output=nn.Linear(hidden_dim,num_class)
    # lengths为每个原始序列的长度存储
    def forward(self,inputs,lengths):
        embeddings=self.embedding(inputs)
        # 使用oack_padded_sequence函数将变长序列打包
        x_pack=pack_padded_sequence(embeddings,lengths,batch_first=True,enforce_sorted=False)
        hidden,(hn,cn)=self.lstm(x_pack)
        outputs=self.output(hn[-1])
        log_probs=F.log_softmax(outputs,dim=-1)
        return log_probs