# encoding: utf-8


import torch
import torch.nn as nn


class Rnn(nn.Module):
    def __init__(self, input_dim, output_dim, num_layer):
        super(Rnn, self).__init__()
        self.lstm1 = nn.LSTM(input_dim, output_dim, num_layer)

    def forward(self, input):
        output, hidden = self.lstm1(input)  # LSTM的输出包括隐藏层计算结果以及隐藏层状态，因此=左边必须是两个值
        output, hidden = self.lstm1(output, hidden)  # LSTM的输出包括隐藏层计算结果以及隐藏层状态，因此=左边必须是两个值
        return output, hidden


if __name__ == "__main__":
    # 实例化
    model = Rnn(input_dim=10, output_dim=10, num_layer=2)
    # 创建输入
    input = torch.randn(20, 15, 10)  # 参数分别代表sequence_length,batch_size,word_embedding
    # 计算
    output, hidden = model(input)
    print(output.shape)  # 输出结果的维度为sequence_length,batch_size，output_dim（最后一层神经网络的输出层维度）
    print(hidden[0].shape)  # 输出结果的维度为num_layer,batch_size，output_dim
