from torch import nn
import torch
from vocab import Vocab
from configs.model_config import device


class BiLstmEncoder(nn.Module):
    """
        基于双向LSTM的Encoder
    Args:
        vocab: 词表
        embedding_size: 词嵌入层输出维度
        hidden_size: LSTM的隐层神经元数量
        num_layers: LSTM的层数
        dropout: dropout层的概率参数
    Input: src_batch, src_lengths
        src_batch : [batch_size, max_src_len]
        src_lengths : [batch,]
    Output: outputs, final_hidden_state, final_cell_state
        outputs : [batch, max_src_len, 2 * hidden_size]
        final_hidden_state : [layer_num, batch_size, hidden_size]
        final_cell_state : [layer_num, batch_size, hidden_size]
    """

    def __init__(self,
                 vocab: Vocab,
                 embedding_size,
                 hidden_size,
                 num_layers,
                 dropout):
        super(BiLstmEncoder, self).__init__()
        self.vocab = vocab
        self.embedding = nn.Embedding(len(vocab), embedding_size, padding_idx=vocab.pad_id)
        self.lstm = nn.LSTM(
            input_size=embedding_size,
            hidden_size=hidden_size,
            num_layers=num_layers,
            batch_first=True,
            bidirectional=True)

        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.directions = 2
        # self.dropout = nn.Dropout(dropout)

    def forward(self, src_batch, src_lengths):
        embeddings = self.embedding(src_batch)  # [batch_size, max_src_len, embedding_size]
        x = nn.utils.rnn.pack_padded_sequence(input=embeddings,
                                              lengths=src_lengths,
                                              batch_first=True,
                                              enforce_sorted=False)  # 加速，删除pad
        outputs, (final_hidden_state, final_cell_state) = self.lstm(x)
        # out_put: [batch_size, max_src_batch_length, 2 * hidden_size]
        # final_hidden_state : [2 * layers_num, batch_size, hidden_size]
        # final_cell_state : [2 * layers_num, batch_size, hidden_size]

        # 因为encoder是双向的,decoder是单向的,所以state只能取一个方向的，这里选择双向的最后一个
        final_idx = torch.arange(self.num_layers, device=device) * self.directions + 1  # 1,3,5,...
        final_hidden_state = torch.index_select(final_hidden_state, 0, final_idx)
        final_cell_state = torch.index_select(final_cell_state, 0, final_idx)

        outputs, _ = nn.utils.rnn.pad_packed_sequence(outputs, batch_first=True)  # 重新填充pad，保持batch内的数据长度一致
        return outputs, final_hidden_state, final_cell_state
