from config.Config import RNN_PARAM as param, BATCH_SIZE
from modules.bi_lstm_encoder import BiLstmEncoder
from modules.tf_bi_lstm_decoder import TeacherForcingLstmDecoder
from utils.vocabulary import big_vocab
from torch import nn


class BiLstm:
    def __init__(self):
        self.encoder = BiLstmEncoder(
            vocab=big_vocab,
            embedding_size=10,
            hidden_size=16,
            num_layers=2,
            dropout=0.1,
        )
        self.decoder = TeacherForcingLstmDecoder(
            vocab=big_vocab,
            embedding_size=10,
            hidden_size=16,
            num_layers=2,
            dropout=0.1
        )


class BiLstmM(nn.Module):
    def __init__(self):
        super(BiLstmM, self).__init__()
        self.encoder = BiLstmEncoder(
            vocab=big_vocab,
            embedding_size=128,
            hidden_size=128,
            num_layers=2,
            dropout=0.3,
        )
        self.decoder = TeacherForcingLstmDecoder(
            vocab=big_vocab,
            embedding_size=128,
            hidden_size=128,
            num_layers=2,
            dropout=0.3
        )

    def forward(self, src_batch, src_lengths, tgt_batch, tgt_lengths):
        encoder_outs, encoder_hidden, encoder_cell = self.encoder(src_batch, src_lengths)
        dist, attention = self.decoder(src_batch=src_batch,
                                       src_lengths=src_lengths,
                                       tgt_batch=tgt_batch[:, 1:],  # <eos>输入decoder并没有什么意义
                                       tgt_lengths=tgt_lengths,
                                       final_encoder_hidden_state=encoder_hidden,
                                       final_encoder_cell_state=encoder_cell,
                                       encoder_outputs=encoder_outs)
        return dist, attention
