import cache
import time
from decimal import Decimal


class PhraseExtractor(object):
    
    def __init__(self, corpus_nl, corpus_en, word_allignments_nl_en, cat):
        assert len(corpus_nl.corpus) == len(corpus_en.corpus), 'corpora should be equal length'
        assert len(corpus_nl.corpus) == len(word_allignments_nl_en.allignments), 'word allignments should match corpora length'
        self.corpus_nl = corpus_nl 
        self.corpus_en = corpus_en
        self.w_a_nl_en = word_allignments_nl_en
        self.phrases = cache.Cache('phrases_nl_en_%s' % cat, dict())
        self.conditional_probs_nl_en = cache.Cache('conditional_probs_nl_en', dict())
        self.conditional_probs_en_nl = cache.Cache('conditional_probs_en_nl', dict())
        self.joint_probs = cache.Cache('joint_probs', dict())
        self.extract_phrases()
        self.extract_conditional_probs()
        self.total_phrases_total = Decimal(sum([self.phrases.cache[x] for x in
            self.phrases.cache]))

    def extract_phrases(self):
        if not self.phrases.cache:
            for i in range(0, len(self.w_a_nl_en.allignments)):
                sentence_phrases = self.extract_sentence_phrases(
                    self.corpus_nl.corpus[i], 
                    self.corpus_en.corpus[i], 
                    self.w_a_nl_en.allignments[i]
                )
                for phrase in sentence_phrases:
                    if phrase in self.phrases.cache:
                        self.phrases.cache[phrase] = self.phrases.cache.get(phrase) + 1
                    else:
                        self.phrases.cache[phrase] = 1
            self.phrases.save()

    def extract_sentence_phrases(self, s_nl, s_en, w_a):
        sentence_phrases = []
        processed_allignments = []
        for i in range(0, len(w_a)):
            if w_a[i] not in processed_allignments:
                processed_allignments.append(w_a[i])
                phrase = self.find_related_allignments(w_a, i)
                processed_allignments += phrase
                # we assume that a set of phrase allignments larger than 10
                # will never result in a phrase pair of phrases with length 4 or less
                if (len(phrase) <= 10):
                    phrase_pair = self.construct_phrase_pair(s_nl, s_en, phrase)
                    if (len(phrase_pair[0].split()) <= 4 and len(phrase_pair[1].split()) <= 4):
                        sentence_phrases.append(phrase_pair)
        return sentence_phrases
    
    def construct_phrase_pair(self, s_nl, s_en, phrase):
        phrase_nl = [s_nl[i] for i in set([int(nl) for (nl, en) in phrase])]
        phrase_en = [s_en[i] for i in set([int(en) for (nl, en) in phrase])]
        return (' '.join(phrase_nl), ' '.join(phrase_en))

    def find_related_allignments(self, w_a, i):
        sequential_relations = self.find_sequential_relations(w_a, i)
        mirror_relations = self.find_mirror_relations(sequential_relations, w_a)
        return sequential_relations + mirror_relations

    # sequential relations: (1, 2) related to (1, ?) and (?, 2)
    def find_sequential_relations(self, w_a, i):
        related_allignments = []
        (left, right) = w_a[i]        
        left_stack = [left]
        right_stack = [right]
        while left_stack or right_stack:
            if left_stack:
                left = left_stack.pop(0)
                left_related = [
                    a for a in w_a 
                    if a not in related_allignments and a[0] == left
                ]
                for relation in left_related:
                    related_allignments.append(relation)
                    right_stack.append(relation[1])
            if right_stack:
                right = right_stack.pop(0)
                right_related = [
                    a for a in w_a 
                    if a not in related_allignments and a[1] == right
                ]
                for relation in right_related:
                    related_allignments.append(relation)
                    left_stack.append(relation[0])
        return related_allignments

    # mirror relations: (1, 2) related to (2, 1)
    def find_mirror_relations(self, sequential_relations, w_a):
        mirror_relations = [] 
        for relation in sequential_relations:
            mirror = [relation[1], relation[0]]
            if mirror in w_a and mirror not in sequential_relations+mirror_relations:
                mirror_relations.append(mirror)
        return mirror_relations

    def extract_conditional_probs(self):
        if not self.conditional_probs_nl_en.cache and not self.conditional_probs_en_nl.cache:
            for (nl, en) in self.phrases.cache:
                prob_nl_en = self.p_f_given_e(nl, en)
                self.conditional_probs_nl_en.cache[(nl, en)] = prob_nl_en
                prob_en_nl = self.p_e_given_f(en, nl)
                self.conditional_probs_en_nl.cache[(en, nl)] = prob_en_nl
            self.conditional_probs_nl_en.save()
            self.conditional_probs_en_nl.save()

    def p_f_given_e(self, f, e):
        """ returns p(f|e)

        count(f, e)/count(any f, e)
        """
        # all phrases with f
        any_f_phrases = filter(lambda x: x[0][0] == f, self.phrases.cache.iteritems())

        if len(any_f_phrases) == 0:
            return 0

        any_f_count = Decimal(sum([x[1] for x in any_f_phrases]))
        f_e_phrases = filter(lambda x: x[0][1] == e, any_f_phrases)

        if len(f_e_phrases) == 0:
            return 0

        f_e_count = Decimal(sum([x[1] for x in f_e_phrases]))

        return f_e_count / any_f_count if f_e_count != 0 else 0

    def p_e_given_f(self, e, f):
        # real ugly duplicate code (see p_f_given_e, but it works!!

        # all phrases with e
        any_e_phrases = filter(lambda x: x[0][1] == e, self.phrases.cache.iteritems())

        if len(any_e_phrases) == 0:
            return 0

        any_e_count = Decimal(sum([x[1] for x in any_e_phrases]))
        e_f_phrases = filter(lambda x: x[0][0] == f, any_e_phrases)

        if len(e_f_phrases) == 0:
            return 0

        e_f_count = Decimal(sum([x[1] for x in e_f_phrases]))

        return e_f_count / any_e_count if e_f_count != 0 else 0

    def compute_joint_probability(self, f, e):
        """ p(e, f) = p(e|f) p(f) == p(f|e) p(e) """
        f_phrases = filter(lambda x: x[0][0] == f, self.phrases.cache.iteritems())
        f_count = Decimal(sum([x[1] for x in f_phrases]))
        p_f = f_count/self.total_phrases_total
        f_e_phrases = filter(lambda x: x[0][1] == e, f_phrases)
        p_e_f = Decimal(sum([x[1] for x in f_e_phrases])/f_count)
        return Decimal(p_f*p_e_f)
