import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence as pack
from torch.nn.utils.rnn import pad_packed_sequence as unpack

from mec.utils.rnn import rnn_factory


class RNNEncoder(nn.Module):
    """
    Args:
        rnn_type : "GRU"、"LSTM"、"RNN"之一
        bidirectional : bool 是否双向
        num_layers : 层数
        hidden_size : hidden_size
        dropout : dropout
        embedding : embedding
    Input:
        src : [batch, max_src_len]
        lengths : [batch,]
    Output:
        final_state:
           LSTM : [num_layers * directions, batch_size, hidden_size], [num_layers * directions, batch_size, hidden_size]
           GRU or RNN : [num_layers * directions, batch_size, hidden_size]
        memory_bank: [batch_size, max_src_len, num_directions * hidden_size]
        lengths : [batch, ]
    """

    def __init__(self,
                 rnn_type,
                 bidirectional,
                 num_layers,
                 hidden_size,
                 dropout,
                 embedding):
        super(RNNEncoder, self).__init__()

        num_directions = 2 if bidirectional else 1
        # assert hidden_size % num_directions == 0
        # hidden_size = hidden_size // num_directions  # 两个方向的话，hidden_size要除以2

        self.num_layers = num_layers
        self.directions = num_directions
        self.hidden_size = hidden_size
        self.memory_size = num_directions * hidden_size
        self.embedding = embedding

        self.rnn = rnn_factory(rnn_type,
                               input_size=embedding.embedding_dim,
                               hidden_size=hidden_size,
                               num_layers=num_layers,
                               dropout=dropout,
                               batch_first=True,
                               bidirectional=bidirectional)

    def forward(self, src, lengths=None):
        emb = self.embedding(src)
        if lengths is not None:
            packed_emb = pack(input=emb,
                              lengths=lengths.to('cpu'),
                              batch_first=True,
                              enforce_sorted=False)
        else:
            packed_emb = emb

        memory_bank, final_state = self.rnn(packed_emb)

        if lengths is not None:
            memory_bank, _ = unpack(memory_bank, batch_first=True)
        return final_state, memory_bank, lengths

    @classmethod
    def from_opt(cls, opt, embedding=None):
        pass
