import random

import torch
import torch.nn as nn


class EncoderRNN(nn.Module):

    def __init__(self, input_size, hidden_size, embedding_layer, n_layer=1, dropout=0):
        """
        :param input_size: 输入大小
        :param hidden_size: 隐藏层大小 embedding_size
        :param embedding_layer: 嵌入层（编码）
        :param n_layer: RNN层数
        :param dropout: 是否dropout
        """
        super(EncoderRNN, self).__init__()
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.embedding_layer = embedding_layer

        self.gru = nn.GRU(input_size=hidden_size,
                          hidden_size=hidden_size,
                          num_layers=n_layer,
                          dropout=(0 if n_layer == 1 else dropout),
                          bidirectional=True)

    def forward(self, input_seq, input_lengths, hidden=None):
        """
        前向传播
        :param input_seq: 输入的句子shape=(max_length, batch_size)
        :param input_lengths: 每个句子的长度 shape=(batch_size, )
        :param hidden: 最后隐藏层的状态 shape=(D*num_layers,batch_size,embedding_size)
        :return:
        """
        embedded = self.embedding_layer(input_seq)
        # 压缩填充张量
        packed = torch.nn.utils.rnn.pack_padded_sequence(embedded, input_lengths)
        outputs, hidden = self.gru(packed, hidden)
        outputs, _ = torch.nn.utils.rnn.pad_packed_sequence(outputs)
        #
        outputs = outputs[:, :, :self.hidden_size] + outputs[:, :, self.hidden_size:]
        return outputs, hidden


if __name__ == '__main__':
    a = torch.tensor([[random.randint(1, 999)for _ in range(256)] for _ in range(20)])
    a_length = torch.tensor([10 for _ in range(256)])
    print(a.shape)
    print(a_length.shape)
    encode = EncoderRNN(input_size=1000, hidden_size=500, embedding_layer=torch.nn.Embedding(1000, 500), n_layer=2)
    y, h = encode(a, a_length)
    print(y.shape)
    print(h.shape)
