from config.Config import SEQ2SEQ_PARAM, BATCH_SIZE
from modules.encoders.bilstm import BiLstmEncoder
from modules.decoders.lstm import LstmDecoder
from utils.vocabulary import big_vocab
from torch import nn


class Seq2seq(nn.Module):
    def __init__(self):
        super(Seq2seq, self).__init__()
        self.encoder = BiLstmEncoder(
            vocab=big_vocab,
            embedding_size=SEQ2SEQ_PARAM['embedding_size'],
            hidden_size=SEQ2SEQ_PARAM['hidden_size'],
            num_layers=SEQ2SEQ_PARAM['num_layers'],
            dropout=SEQ2SEQ_PARAM['dropout'],
        )
        self.decoder = LstmDecoder(
            vocab=big_vocab,
            embedding_size=SEQ2SEQ_PARAM['embedding_size'],
            hidden_size=SEQ2SEQ_PARAM['hidden_size'],
            num_layers=SEQ2SEQ_PARAM['num_layers'],
            dropout=SEQ2SEQ_PARAM['dropout']
        )

    def forward(self, src_batch, src_lengths, tgt_batch):
        tgt_batch = tgt_batch[:, :-1]  # tgt_batch中的EOS不会输入
        encoder_outs, encoder_hidden, encoder_cell = self.encoder(src_batch, src_lengths)
        dist, attention = self.decoder(src_batch=src_batch,
                                       src_lengths=src_lengths,
                                       tgt_batch=tgt_batch,
                                       final_encoder_hidden_state=encoder_hidden,
                                       final_encoder_cell_state=encoder_cell,
                                       encoder_outputs=encoder_outs)
        return dist, attention
