"""
编码器
"""
from torch import nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence

import config


class Encoder(nn.Module):
    def __init__(self):
        super(Encoder, self).__init__()
        self.embedding = nn.Embedding(num_embeddings=len(config.num_seq),
                                      embedding_dim=config.embedding_dim,
                                      padding_idx=config.num_seq.PAD)  # 指明padding_idx，在反向传播计算梯度时不计算填充部分，减少计算量
        self.gru = nn.GRU(input_size=config.embedding_dim, hidden_size=config.hidden_size,
                          num_layers=config.num_layers, batch_first=True)

    def forward(self, input, input_length):
        """

        :param input: [batch_size, max_len]
        :param input_length:
        :return:
        """
        # Embedding操作
        # embedded: [batch_size, max_len, embedding_dim]
        embedded = self.embedding(input)

        # 打包
        # out: [batch_size, max_len, embedding_dim]
        # enforce_sorted默认是True，意思是传入的数据长度必须按从大到小排序，如果不排序，可以将改参数置为False
        out = pack_padded_sequence(embedded, input_length, batch_first=True, enforce_sorted=True)

        # GRU
        # out: [batch_size, max_len, hidden_size]
        # hidden: [num_layers*1, batch_size, hidden_size]
        out, hidden = self.gru(out)

        # 解包
        # out: [batch_size, max_len, hidden_size]
        # 同pack_padded_sequence
        out, out_length = pad_packed_sequence(out, batch_first=True, padding_value=config.num_seq.PAD)

        return out, out_length, hidden


if __name__ == '__main__':
    from dataset import get_loader_data

    encoder = Encoder()

    for input, target, input_length, target_length in get_loader_data():
        out, out_length, hidden = encoder(input, input_length)

        print(out.size())
        print(out_length)
        print(hidden.size())
        break
