# models: BeamSearch
import numpy
import copy


class BeamSearch(object):

    def __init__(self, enc_dec, configuration, beam_size=1, maxlen=50, stochastic=True, path=None):

        self.enc_dec = enc_dec
        # if sampling, beam_size = 1
        self.beam_size = beam_size
        # max length of output sentence
        self.maxlen = maxlen
        # stochastic == True stands for sampling
        self.stochastic = stochastic

        if self.beam_size > 1:
            assert not self.stochastic, 'Beam search does not support stochastic sampling'


    def apply(self, input):

        sample = []
        sample_score = []

        if self.stochastic:
            sample_score = 0

        live_k = 1
        dead_k = 0

        hyp_samples = [[]] * live_k
        hyp_scores = numpy.zeros(live_k).astype('float32')
        hyp_states = []

        # get initial state of decoder rnn and encoder context
        ret = self.enc_dec.compile_init_and_context(input)
        next_state, ctx0 = ret[0], ret[1]
        # bos indicator
        next_w = -1 * numpy.ones((1,)).astype('int64')

        for i in range(self.maxlen):
            ctx = numpy.tile(ctx0, [live_k, 1])
            inps = [next_w, ctx, next_state]
            ret = self.enc_dec.compile_next_state_and_probs(*inps)
            next_p, next_state, next_w = ret[0], ret[1], ret[2]

            if self.stochastic:
                nw = next_w[0]
                sample.append(nw)
                sample_score += next_p[0, nw]
                # 0 for EOS
                if nw == 0:
                    break
            else:
                cand_scores = hyp_scores[:, None] - numpy.log(next_p)
                cand_flat = cand_scores.flatten()
                ranks_flat = cand_flat.argsort()[:self.beam_size-dead_k]

                voc_size = next_p.shape[1]
                trans_indices = ranks_flat / voc_size
                word_indices = ranks_flat % voc_size
                costs = cand_flat[ranks_flat]

                new_hyp_samples = []
                new_hyp_scores = numpy.zeros(self.beam_size-dead_k).astype('float32')
                new_hyp_states = []

                for idx, [ti, wi] in enumerate(zip(trans_indices, word_indices)):
                    new_hyp_samples.append(hyp_samples[ti]+[wi])
                    new_hyp_scores[idx] = copy.copy(costs[idx])
                    new_hyp_states.append(copy.copy(next_state[ti]))

                # check the finished samples
                new_live_k = 0
                hyp_samples = []
                hyp_scores = []
                hyp_states = []

                for idx in range(len(new_hyp_samples)):
                    if new_hyp_samples[idx][-1] == 0:
                        sample.append(new_hyp_samples[idx])
                        sample_score.append(new_hyp_scores[idx])
                        dead_k += 1
                    else:
                        new_live_k += 1
                        hyp_samples.append(new_hyp_samples[idx])
                        hyp_scores.append(new_hyp_scores[idx])
                        hyp_states.append(new_hyp_states[idx])

                hyp_scores = numpy.array(hyp_scores)
                live_k = new_live_k

                if live_k < 1 or dead_k >= self.beam_size:
                    break

                next_w = numpy.array([w[-1] for w in hyp_samples])
                next_state = numpy.array(hyp_states)

        if not self.stochastic:
            # dump every remaining one
            if live_k > 0:
                for idx in range(live_k):
                    sample.append(hyp_samples[idx])
                    sample_score.append(hyp_scores[idx])

        return sample, sample_score

