from modules.encoders.base_encoder import BaseEncoder
import torch
from utils.tensor import sequence_mask


class MeanEncoder(BaseEncoder):
    """

    Args:
        num_layers : int, encoder的层数，为了对应到decoder
        embedding : embedding层,embedding_size

    Input:
        src : [batch, max_src_len]
        lengths : [batch,]

    Output:
        final_state : 2 * [num_layers, batch_size, embedding_size]
        memory_bank : [batch_size, max_src_len, embedding_size]
        lengths : [batch,]
    """

    @classmethod
    def from_opt(cls, opt, embedding=None):
        pass

    def __init__(self, num_layers, embedding):
        super(MeanEncoder, self).__init__()
        self.num_layers = num_layers
        self.embedding = embedding
        self.hidden_size = self.embedding.embedding_dim

    def forward(self, src, lengths=None):
        emb = self.embedding(src)
        batch_size, _, embed_dim = emb.shape

        if lengths is not None:
            mask = sequence_mask(lengths).float()
            mask = mask / lengths.unsqueeze(1).float()
            mean = torch.bmm(mask.unsqueeze(1), emb).squeeze(1)
        else:
            mean = emb.mean(1)

        mean = mean.expand(self.num_layers, batch_size, embed_dim)
        memory_bank = emb
        final_state = (mean, mean)
        return final_state, memory_bank, lengths


if __name__ == '__main__':
    t_embedding = torch.nn.Embedding(12, 7, 0)
    t_src = torch.tensor([
        [1, 2, 3, 4, 5],
        [2, 3, 4, 5, 0],
        [3, 4, 5, 0, 0],
        [4, 5, 0, 0, 0]
    ])
    t_src_len = torch.tensor([5, 4, 3, 2])
    t_mean_encoder = MeanEncoder(
        2,
        embedding=t_embedding
    )
    print([each.shape for each in t_mean_encoder(t_src, t_src_len)])
    print([each.shape for each in t_mean_encoder(t_src) if each is not None])
