'''
Created on Jun 1, 2010

@author: Guan Gui, Will Zhang
$LastChangedBy: wilzzha $
'''
import math
import sys

__version__ = "$Rev: 374 $"

class Decoder(object):
    '''
    This class implements a beam search stack decoder for SMT.
    '''

    def __init__(self, LM, RM, TM, src_sent, decoder_stack_threshold = 5,
                 translated_src_mask = None, last_translated_index = None,
                 partial_tgt_sent = None):
        '''
        Constructor
        @param LM: Language Model
        @type LM: L{LanguageModel}
        @param RM: Reordering Model
        @type RM: L{Reordering Model}
        @param TM: Translation Model
        @type TM: L{TranslationModel}
        @param src_sent: The source sentence to be translated
        @type src_sent: C{list} of C{str}
        @param decoder_stack_threshold: The alpha threshold value used in stack pruning
        @type decoder_stack_threshold: C{float}
        @param translated_src_mask: A boolean mask indicating which words in the source sentence
                                    has been translated.
        @type translated_src_mask: C{list} of C{bool}
        @param last_translated_index: The index of the source word in source sentence aligned with
                                        the last target word in the partially translated target.
        @type last_translated_index: C{int}
        @param partial_tgt_sent: The partial translation.
        @type partial_tgt_sent: C{list} of C{str}
        '''
        # initialize the decoder stacks
        self._decoder_stacks = [DecoderStack(decoder_stack_threshold) for _ in xrange(0, len(src_sent) + 1)]
        
        self._src_sent = src_sent
        self._partial_tgt_sent = partial_tgt_sent

        Hypothesis.translation_model = TM
        Hypothesis.reordering_model = RM
        Hypothesis.language_model = LM
        Hypothesis.source_sentence = src_sent

        # initialize the base hypothesis
        if partial_tgt_sent is None:
            # initialize an empty hypothesis
            self._decoder_stacks[0].push(Hypothesis(None))
        else:
            # Check the partial translation input validity
            assert translated_src_mask != None and last_translated_index != \
            None and partial_tgt_sent != None, "Must specify partial translation"
            assert len(translated_src_mask) == len(src_sent), "Mask and Source Sentence Length Mismatch"
            assert last_translated_index in range(len(src_sent)), "Last translated index is out of range"

            # initialize a partially solved hypothesis
            base_hypo = Hypothesis(None, last_translated_index, partial_tgt_sent)
            base_hypo.mask = translated_src_mask
            self._decoder_stacks[sum(translated_src_mask)].push(base_hypo)

    @property
    def decoder_stacks(self):
        return self._decoder_stacks

    @property
    def src_sent(self):
        return self._src_sent

    def decode(self):
        """
        Invoke this function to complete decoding.
        """
        # Step forward from Stack 0 to Stack I
        for i, stack in enumerate(self._decoder_stacks):
            # status feedback to stdout
            print ("%d words has been translated\r" % (i)),
            sys.stdout.flush()
            # For every hypothesis in the current stack
            for hypo in stack:
                # Spawn child hypotheses
                for new_hypo in hypo.spawn_new_hypotheses():
                    #print "In Stack %d, Try target %s" % (i, new_hypo._translation_target)
                    # and push these child hypotheses onto the right stack
                    #    determined by translation progress
                    self._decoder_stacks[i + 1].push(new_hypo)

    def print_ranked_results(self, num_results):
        """
        This function prints out a maximum of <num_results> best target results to stdout
        @param num_results: The maximum number of results to print out. 
        """
        decomposed_decoder_stack = self._decoder_stacks[-1].decompose()

        for ((ph, tt), ps) in decomposed_decoder_stack[:num_results]:
            print '"%s" with score %.4f' % (' '.join(ph.last_n_targets(len(self._src_sent) - 1) + [tt]), ps)

class ArcStack(list):
    """
    This class is used to hold the "arcs" or back trace from a child hypothesis to
    a list of parent hypothesis.
    The stack is kept sorted by the partial score derived from coming through the
    corresponding parent hypothesis.
    """

    def __init__(self):
        super(ArcStack, self).__init__()

    def push(self, (parent, partial_score)):
        """
        Push an "arc" or back trace along with the partial score coming from the
        corresponding parent hypothesis onto the stack.
        """
        has_inserted = False
        for i, (_, v) in enumerate(self):
            if partial_score > v:
                self.insert(i, (parent, partial_score))
                has_inserted = True
                break
        if not has_inserted:
            self.append((parent, partial_score))

class DecoderStack(list):
    """
    This class implements the "stack" structure as in "stack decoding".
    A stack holds a number of hypothesis that are of the same progress(in terms
    of how many source words has been translated) and is sorted by the best
    partial score in each hypothesis. The maximum number of hypotheses can exist
    in one stack is referred to as the width of the "beam" in "beam search".
    If more hypotheses is pushed onto a "full" stack, the worst scoring hypothesis
    will be pushed out of the stack. This particular implementation uses threshold 
    based pruning instead of histogram based.
    """

    def __init__(self, threshold):
        """
        Constructor
        @param threshold: The pruning threshold, i.e. the maximum tolerable ratio
        between the best and worst scoring hypothesis on the same stack.
        """
        super(DecoderStack, self).__init__()
        self._threshold = math.log(threshold)

    @property
    def threshold(self):
        return  math.exp(self._threshold)

    @threshold.setter
    def threshold(self, threshold):
        self._threshold = math.log(threshold)

    def push(self, newhypo):
        """
        Push a new hypothesis onto the stack.
        This function invokes method that does hypotheses recombination
         and handles in-order insertion into the stack.
        @param newhypo: The new hypothesis to be pushed onto the stack.
        """
        has_recombined = False
        to_be_inserted_hypo = newhypo
        has_inserted = False
        # See if the new hypothesis can be recombined with any hypothesis on the stack
        for h in self:
            if h.recombine(newhypo):
                has_recombined = True
                break

        # if a recombination has occurred, the recombined hypothesis' best arc score
        #    maybe changed and thus require re-insertion into the stack to maintain the
        #    order of the stack
        if has_recombined:
            self.remove(h)
            to_be_inserted_hypo = h

        for i, h in enumerate(self):
            if to_be_inserted_hypo > h:
                self.insert(i, to_be_inserted_hypo)
                has_inserted = True
                break

        if not has_inserted:
            self.append(to_be_inserted_hypo)

        # pruning
        while self[0].partial_score - self[-1].partial_score > self._threshold:
            self.pop()

# histogram pruning
#        while len(self) > 100:
#            self.pop()

    def decompose(self):
        """
        Back trace all hypotheses in the stack to generate full target sentences and their scores
        """
        decomposed_arc_stack = ArcStack()
        for hypo in self:
            for (parent_hypo, partial_score) in hypo.parent_arcs:
                decomposed_arc_stack.push(((parent_hypo, hypo.translation_target), partial_score))
        return decomposed_arc_stack

class Hypothesis(object):
    """
    This class implements hypotheses.
    """
    language_model = None
    reordering_model = None
    translation_model = None
    source_sentence = None
    lambda_LM = 1
    lambda_RM = 1
    lambda_TM = 1
    reordering_limit_dist = 5

    def __init__(self,
                 parent,
                 translation_index = -1,
                 translation_target = None,
                 translation_score_penalty = None):
        """
        Constructor
        @param parent: The parent hypothesis this new hypothesis is extended from
        @param translation_index: The index of the source word that was translated
                                    in this hypothesis on top of its parent.
        @param translation_target: The target word that was translated in this
                                    hypothesis on top of its parent
        @param translation_score_penalty: The partial log probability score of the
                                    translation relative to the parent.
        """
        # Check model validity and source_sentence
        assert self.language_model != None and self.reordering_model != None\
         and self.translation_model != None and self.source_sentence != None\
         , "Those models or source sentence cannot be None!"

        if parent is None: #Create a Empty Hypothesis
            # list of bools. We mark translated word positions as True
            self._mask = [False for _ in self.source_sentence]
            self._partial_score = 0
            self._parent_arcs = None
            self._translation_index = translation_index
            self._translation_target = translation_target
        else: # Create a new hypothesis based on a parent hypothesis
            self._mask = (parent._mask)[:]
            self._mask[translation_index] = True
            self._translation_index = translation_index
            self._translation_target = translation_target
            self._parent_arcs = ArcStack()
            self._partial_score = parent.partial_score + translation_score_penalty
            self._parent_arcs.push((parent, self._partial_score))

    @property
    def mask(self):
        return self._mask

    @mask.setter
    def mask(self, new_mask):
        self._mask = new_mask

    @property
    def parent_arcs(self):
        return self._parent_arcs

    @property
    def principal_parent(self):
        """
        @return: A parent hypothesis that has the best scored in the arc stack
        """
        if self._parent_arcs is None:
            return None
        else:
            return self._parent_arcs[0][0]

    @property
    def partial_score(self):
        return self._partial_score

    @property
    def translation_target(self):
        return self._translation_target

    @property
    def translation_index(self):
        return self._translation_index

    def recombine(self, new_hypo):
        '''
        Recombine this hypothesis with new_hypo if applicable
        @param new_hytp: The new hypothesis to be tested and recombined.
        @type new_hytp: L{Hypothesis}
        @return: True if recombination is applicable, or False otherwise
        @rtype: C{bool}
        '''
        #Condition of recombination Check, translation status(i.e. the mask),
        # identical last translation(source word -> target word)
        # identical (n-1)-gram preceding the current target word
        if self._mask == new_hypo.mask and \
            self._translation_index == new_hypo.translation_index and \
            self.__recursive_check_LM(self.language_model.n - 1, self, new_hypo):
            #combine, push the arc to new_hypo's parent into self.parent_arcs
            self._parent_arcs.push(new_hypo.parent_arcs[0])

            self._partial_score = self._partial_score if self > new_hypo else\
             new_hypo._partial_score

            return True
        else:
            return False

    def __recursive_check_LM(self, depth, hypo1, hypo2):
        """
        Check if identical (n-1)-gram preceding the current target word
        """
        if depth == 0 or hypo1.principal_parent is None or\
         hypo2.principal_parent is None:
            if hypo1.principal_parent is None:
                assert hypo2.principal_parent is None, "Bug 1"
            elif hypo2.principal_parent is None:
                assert hypo1.principal_parent is None, "Bug 1"
            return hypo1.translation_target == hypo2.translation_target
        else:
            return hypo1.translation_target == hypo2.translation_target and \
        self.__recursive_check_LM(depth - 1,
                                  hypo1.principal_parent, hypo2.principal_parent)

    def spawn_new_hypotheses(self):
        """
        Spawn child hypotheses based on this hypothesis.
        Every translation option can lead to a new hypothesis, under three constraints
        This function also handles the partial score calculation by querying
        various models, namely LM,RM,TM.
        When all words are translated or there's a word not in the translation model
        dictionary, no child hypotheses will be spawned.
        """
        choices_with_reord_limit = []
        choices = []

        # for every word in the source sentence
        for position, position_is_translated in enumerate(self.mask):
            # if the source word has not been translated
            if not position_is_translated:
                choices.append(position)
                # if the option is within the reordering limit
                if math.fabs(position - self._translation_index) < self.reordering_limit_dist:
                    choices_with_reord_limit.append(position)

        for position in (choices if choices_with_reord_limit == [] else choices_with_reord_limit):
            now_translate = self.source_sentence[position]

            # Query the translation model, get all the possible translations of
            # this word and translation probabilities
            for target, tms in self.translation_model[now_translate].iteritems():

                # Get the reordering model score
                rms = self.reordering_model.score(self._translation_index, position)

                # Query the n-gram language model to get the language model score
                ngram = self.last_n_targets(self.language_model.n - 1) + [target]
                lms = self.language_model[tuple(ngram)]

                # Calculate the cost or score penalty of this translation option
                penalty = self.lambda_TM * math.log(tms) + \
                         self.lambda_RM * math.log(rms) + \
                         self.lambda_LM * math.log(lms)

                # Yield a new hypothesis with these data
                yield Hypothesis(self, position, target, penalty)

    def __cmp__(self, y):
        """
        Override comparison function to simplify higher-level code.
        """
        return 0 if self is y else (-1 if self._partial_score < y.partial_score else 1)

#    def __repr__(self):
#        return str(self._partial_score)

    def last_n_targets(self, n):
        """
        Trace back n hypothesis, return the last n target words in a String 
        """
        if n == 0 : #Recursion Base Case
            return []
        elif self._parent_arcs is None:
            if self._translation_target is None: #Empty Hypothesis:
                return []
            else:             #Base Hypothesis with a partial translation:
                return self._translation_target[-n:]
        else: #Recursive Case, Back Tracing
            return self.principal_parent.last_n_targets(n - 1) + [self.translation_target]

