from modules.encoders.bilstm import BiLstmEncoder
from modules.decoders.context import ConsumDecoder
from utils.vocabulary import big_vocab
from torch import nn
from config.Config import CONSUM_PARAM


class Consum(nn.Module):
    def __init__(self):
        super(Consum, self).__init__()
        self.encoders = []
        self.decoder = ConsumDecoder(
            vocab=big_vocab,
            embedding_size=CONSUM_PARAM['embedding_size'],
            method_encoder_hidden_size=CONSUM_PARAM['hidden_size'],
            context_encoder_hidden_size=CONSUM_PARAM['hidden_size'],
            num_layers=CONSUM_PARAM['num_layers'],
            dropout=CONSUM_PARAM['dropout'])

    def forward(self, m_src_batch, m_src_lengths, c_src_batch, c_src_lengths, tgt_batch):
        tgt_batch = tgt_batch[:, :-1]
        m_encoder_outs, m_encoder_hidden, m_encoder_cell = self.method_encoder(m_src_batch, m_src_lengths)
        c_encoder_outs, c_encoder_hidden, c_encoder_cell = self.context_encoder(c_src_batch, c_src_lengths)
        dist, m_attention, c_attention = self.decoder(m_src_batch=m_src_batch,
                                                      m_src_lengths=m_src_lengths,
                                                      c_src_batch=c_src_batch,
                                                      c_src_lengths=c_src_lengths,
                                                      tgt_batch=tgt_batch,
                                                      final_m_encoder_hidden_state=m_encoder_hidden,
                                                      final_m_encoder_cell_state=m_encoder_cell,
                                                      final_c_encoder_hidden_state=c_encoder_hidden,
                                                      final_c_encoder_cell_state=c_encoder_cell,
                                                      m_encoder_outputs=m_encoder_outs,
                                                      c_encoder_outputs=c_encoder_outs)
        return dist, m_attention, c_attention
