from config import Config
from utils.beam import BeamSearch, tile


class BeamSearchWrapper:
    def __init__(self, decoder, beam_width=4, n_best=1, max_iter=30, length_penalty=0.4, coverage_penalty=0.0):
        self.decoder = decoder
        self.vocab_size = decoder.vocab_size
        self.pad_id = decoder.vocab[Config.PAD_TOKEN]
        self.bos_id = decoder.vocab[Config.BOS_TOKEN]
        self.eos_id = decoder.vocab[Config.EOS_TOKEN]
        self.beam_width = beam_width
        self.n_best = n_best
        self.max_iter = max_iter
        self.length_penalty = length_penalty
        self.coverage_penalty = False
        self.beam = None

    def initialize(self, encoder_final_hidden, encoder_final_cell, encoder_outputs, src_lengths):
        # memory_lengths = src_lengths
        self.beam = BeamSearch(
            beam_size=self.beam_width,
            batch_size=Config.BATCH_SIZE,
            bos=self.bos_id,
            eos=self.eos_id,
            n_best=self.n_best,
            device=Config.DEVICE,
            max_length=self.max_iter,
            return_attention=True,
            memory_lengths=src_lengths,
            stepwise_penalty=self.coverage_penalty,
            length_penalty=self.length_penalty,
            ratio=0.)

        tile_ = lambda x, dim=0: tile(x, self.beam_width, dim)
        # enc_final = tuple_map(tile_, enc_final, dim=1)
        encoder_final_hidden = tile_(encoder_final_hidden, dim=1)
        encoder_final_cell = tile_(encoder_final_cell, dim=1)
        # memory = tuple_map(tile_, memory)
        encoder_outputs = tile_(encoder_outputs, dim=0)
        # src_lengths = tuple_map(tile_, src_lengths)
        src_lengths = tile_(src_lengths)

        init_state = self.decoder.generate_init_cell_state(encoder_final_hidden, encoder_final_cell)
        init_state.context = tile_(init_state.context)
        return init_state, encoder_outputs, src_lengths

    def decode(self, final_encoder_hidden, final_encoder_cell, encoder_outputs=None, src_batch=None, src_lengths=None):
        prev_state, encoder_outputs, src_lengths = self.initialize(final_encoder_hidden, final_encoder_cell,
                                                                   encoder_outputs,
                                                                   src_lengths)

        for step in range(self.max_iter):
            embed = self.decoder.embedding(self.beam.current_predictions)

            embed = embed.squeeze(1)
            state, vocab_dist = self.decoder.one_step(embed,
                                                      prev_state, encoder_outputs, src_batch,
                                                      src_lengths)
            log_probs = vocab_dist.log()
            alignments = state.alignments
            self.beam.advance(log_probs, alignments)
            any_beam_finished = self.beam.is_finished.any()
            if any_beam_finished:
                self.beam.update_finished()
                if self.beam.done:
                    break
            select = lambda x: x.index_select(0, self.beam.current_origin)
            if any_beam_finished:
                encoder_outputs = select(encoder_outputs)
                src_lengths = select(src_lengths)
            prev_state = state.batch_select(self.beam.current_origin)

        predictions = [each[0] for each in self.beam.predictions]
        return predictions, self.beam.scores, self.beam.attention
