from modules.encoders.base_encoder import BaseEncoder
import torch


class NoneEncoder(BaseEncoder):
    """

    Args:
        num_layers : int, encoder的层数，为了对应到decoder
        embedding : embedding层,embedding_size

    Input:
        src : [batch, max_src_len]
        lengths : [batch,]

    Output:
        final_state : 2 * [num_layers, batch_size, embedding_size]
        memory_bank : [batch_size, max_src_len, embedding_size]
        lengths : [batch,]
    """

    @classmethod
    def from_opt(cls, opt, embedding=None):
        pass

    def __init__(self, embedding):
        super(NoneEncoder, self).__init__()
        self.embedding = embedding
        self.hidden_size = self.embedding.embedding_dim

    def forward(self, src, lengths=None):
        emb = self.embedding(src)
        batch_size, _, embed_dim = emb.shape
        memory_bank = emb
        return None, memory_bank, lengths


if __name__ == '__main__':
    pass
