# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
# Modifications Copyright 2017 Abigail See
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This file contains code to run beam search decoding"""

import tensorflow as tf
import numpy as np
from Chatbot_Retrieval_model.Dialogue_utterance_rewriter import data
from Chatbot_Retrieval_model.Dialogue_utterance_rewriter.config import Config

cf = Config()


class Hypothesis(object):
    """
    Class to represent a hypothesis during beam search. 
    Holds all the information needed for the hypothesis.
    """

    def __init__(self, tokens, log_probs, state, attn_dists, t_coverage, b_coverage):
        """Hypothesis constructor.

        Args:
            tokens: List of integers. The ids of the tokens that form the summary so far.
            log_probs: List, same length as tokens, of floats, giving the log probabilities of the tokens so far.
            state: Current state of the decoder, a LSTMStateTuple.
            attn_dists: List, same length as tokens, of numpy arrays with shape (attn_length). These are the attention distributions so far.
            coverage: Numpy array of shape (attn_length), or None if not using coverage. The current coverage vector.
        """
        self.tokens = tokens
        self.log_probs = log_probs
        self.state = state
        self.attn_dists = attn_dists
        self.t_coverage = t_coverage
        self.b_coverage = b_coverage

    def extend(self, token, log_prob, state, attn_dist, t_coverage, b_coverage):
        """Return a NEW hypothesis, extended with the information from the latest step of beam search.

        Args:
            token: Integer. Latest token produced by beam search.
            log_prob: Float. Log prob of the latest token.
            state: Current decoder state, a LSTMStateTuple.
            attn_dist: Attention distribution from latest step. Numpy array shape (attn_length).
            coverage: Latest coverage vector. Numpy array shape (attn_length), or None if not using coverage.
        Returns:
            New Hypothesis for next step.
        """
        return Hypothesis(
            tokens=self.tokens + [token],
            log_probs=self.log_probs + [log_prob],
            state=state,
            attn_dists=self.attn_dists + [attn_dist],
            t_coverage=t_coverage,
            b_coverage=b_coverage)

    @property
    def latest_token(self):
        return self.tokens[-1]

    @property
    def log_prob(self):
        # the log probability of the hypothesis so far is the sum of the log probabilities of the tokens so far
        return sum(self.log_probs)

    @property
    def avg_log_prob(self):
        # normalize log probability by number of tokens (otherwise longer sequences always have lower probability)
        return self.log_prob / len(self.tokens)


def run_beam_search(sess, model, vocab, batch):
    """Performs beam search decoding on the given example.

    Args:
        sess: a tf.Session
        model: a seq2seq model
        vocab: Vocabulary object
        batch: Batch object that is the same example repeated across the batch

    Returns:
        best_hyp: Hypothesis object; the best hypothesis found by beam search.
    """
    # Run the encoder to get the encoder hidden states and decoder initial state
    enc_states, query_states, dec_in_state = model.run_encoder(sess, batch)
    # dec_in_state is a LSTMStateTuple
    # enc_states has shape [batch_size, <=max_enc_steps, 2*hidden_dim].

    # Initialize beam_size-many hyptheses
    hyps = [
        Hypothesis(
            tokens=[vocab.word2id(data.MARK_GO)],
            log_probs=[0.0],
            state=dec_in_state,
            attn_dists=[],
            # zero vector of length attention_length
            t_coverage=np.zeros([batch.enc_batch.shape[1]]),
            b_coverage=np.zeros([batch.query_batch.shape[1]])
            )
        for _ in range(cf.beam_size)
    ]
    # this will contain finished hypotheses (those that have emitted the [STOP] token)
    results = []

    steps = 0
    while steps < cf.max_dec_steps and len(results) < cf.beam_size:
        # latest token produced by each hypothesis
        latest_tokens = [h.latest_token for h in hyps]
        # change any in-article temporary OOV ids to [UNK] id, so that we can lookup word embeddings
        latest_tokens = [
            t if t in range(vocab.size()) else vocab.word2id(data.MARK_UNK)
            for t in latest_tokens
        ]
        # list of current decoder states of the hypotheses
        states = [h.state for h in hyps]
        # list of coverage vectors (or None)
        prev_t_coverage = [h.t_coverage for h in hyps]
        prev_b_coverage = [h.b_coverage for h in hyps]

        # Run one step of the decoder to get the new info
        (topk_ids, topk_log_probs, new_states, attn_dists,
         new_t_coverage, new_b_coverage) = model.decode_onestep(
             sess=sess,
             batch=batch,
             latest_tokens=latest_tokens,
             enc_states=enc_states,
             query_states=query_states,
             dec_init_states=states,
             prev_t_coverage=prev_t_coverage,
             prev_b_coverage=prev_b_coverage)

        # Extend each hypothesis and collect them all in all_hyps
        all_hyps = []
        # On the first step, we only had one original hypothesis (the initial hypothesis).
        # On subsequent steps, all original hypotheses are distinct.
        num_orig_hyps = 1 if steps == 0 else len(hyps)
        for i in range(num_orig_hyps):
            # take the ith hypothesis and new decoder state info
            h, new_state, attn_dist, new_t_coverage_i, new_b_coverage_i = hyps[i], new_states[
                i], attn_dists[i], new_t_coverage[i], new_b_coverage[i]
            # for each of the top 2*beam_size hyps:
            for j in range(cf.beam_size * 2):
                # Extend the ith hypothesis with the jth option
                new_hyp = h.extend(
                    token=topk_ids[i, j],
                    log_prob=topk_log_probs[i, j],
                    state=new_state,
                    attn_dist=attn_dist,
                    t_coverage=new_t_coverage_i,
                    b_coverage=new_b_coverage_i)
                all_hyps.append(new_hyp)

        # Filter and collect any hypotheses that have produced the end token.
        hyps = []  # will contain hypotheses for the next step
        for h in sort_hyps(all_hyps):  # in order of most likely h
            # if stop token is reached...
            if h.latest_token == vocab.word2id(data.MARK_EOS):
                # If this hypothesis is sufficiently long, put in results. Otherwise discard.
                if steps >= cf.min_dec_steps:
                    results.append(h)
            else:  # hasn't reached stop token, so continue to extend this hypothesis
                hyps.append(h)
            if len(hyps) == cf.beam_size or len(results) == cf.beam_size:
                # Once we've collected beam_size-many hypotheses for the next step, or beam_size-many complete hypotheses, stop.
                break

        steps += 1

    # At this point, either we've got beam_size results, or we've reached maximum decoder steps

    # if we don't have any complete results,
    # add all current hypotheses (incomplete summaries) to results
    if len(results) == 0:
        results = hyps

    # Sort hypotheses by average log probability
    hyps_sorted = sort_hyps(results)
   # tensor_sorted = tf.convert_to_tensor(hyps_sorted[0]) 
    # Return the hypothesis with highest average log prob
    return hyps_sorted[0]


def sort_hyps(hyps):
    """Return a list of Hypothesis objects, sorted by descending average log probability"""
    return sorted(hyps, key=lambda h: h.avg_log_prob, reverse=True)
