from data import train_data_loader
from data import tgt_vocab, src_vocab
from modules.encoder import BiLstmEncoder
from modules.decoder import LstmDecoder
from torch import nn
from configs.model_config import params

sp = params['sp']


class Seq2seq(nn.Module):
    def __init__(self):
        super(Seq2seq, self).__init__()
        self.encoder = BiLstmEncoder(
            vocab=src_vocab,
            embedding_size=sp['embedding_size'],
            hidden_size=sp['hidden_size'],
            num_layers=sp['num_layers'],
            dropout=sp['dropout'],
        )
        self.decoder = LstmDecoder(
            vocab=tgt_vocab,
            embedding_size=sp['embedding_size'],
            hidden_size=sp['hidden_size'],
            num_layers=sp['num_layers'],
            dropout=sp['dropout']
        )

    def forward(self, src_batch, src_lengths, tgt_batch):
        tgt_batch = tgt_batch[:, :-1]  # tgt_batch中的EOS不会输入
        encoder_outs, encoder_hidden, encoder_cell = self.encoder(src_batch, src_lengths)
        dist = self.decoder(src_batch=src_batch,
                            src_lengths=src_lengths,
                            tgt_batch=tgt_batch,
                            final_encoder_hidden_state=encoder_hidden,
                            final_encoder_cell_state=encoder_cell,
                            encoder_outputs=encoder_outs)
        return dist
