import corpus
import allignments
import phrase_extractor as pe
from itertools import permutations


def part_in_part(a, b):
    """checks if every part of a is in b """
    b_split = b.split()
    for part_a in a.split():
        if part_a not in b_split:
            return False
    return True


def phrase_combined(target_phrase, phrases):
    """ returns true if target_phrase can be made from combinations of some
        phrases in phrases, false otherwise

        first tries to prune some stuff from the phrases to limit search space"""
    #target_phrase_splitted = phrase.split()

    candidate_phrases = set()

    for part_candidate in phrases:

        # conditions to check before considering it as a possibilty for a part:
        # * all words of f of candidate are found in the f of the target
        # * all words of e of candidate are found in the e of the target

        if part_in_part(part_candidate[0], target_phrase[0]) and \
                part_in_part(part_candidate[1], target_phrase[1]):
            candidate_phrases.add(part_candidate)


    # we can just quit in this case
    if len(candidate_phrases) == 0:
        return False

    print 'set: ', candidate_phrases, 'len: ', len(candidate_phrases)
    print 'target: ', target_phrase

    # try out all permutations in candidate_phrases (candidate_phrases is not that large)
    # (like we would have done without the pruning)

    max_phrases = len(target_phrase[0].split()) + len(target_phrase[1].split())

    # WARNING: Not necessary but increases speed for minimal loss:
    max_phrases = max(5, max_phrases)

    max_0 = len(target_phrase[0].split())
    max_1 = len(target_phrase[1].split())
    print 'max phrases: ', max_phrases, 'max 0 ', max_0, ' max 1 ', max_1

    candidate_phrases = list(candidate_phrases)
    for indices in permutations_up_to(range(len(candidate_phrases)), max_phrases):
        set_set = [candidate_phrases[index] for index in indices]

        zero = [x[0] for x in set_set]
        one = [x[1] for x in set_set]

        for i in permutations_up_to(zero, max_0):
            i = ' '.join(i)
            if i != target_phrase[0]:
                continue

            for j in permutations_up_to(one, max_1):
                j = ' '.join(j)
                if j == target_phrase[1]:
                    print ' aaaaaaaahhhhhhhhwwwwwwww yea'
                    return True

    return False

def permutations_up_to(l, m):
    for i in xrange(m + 1):
        for j in permutations(l, i):
            yield j

def get_a_b(train_phrases, heldout_phrases):

    # calculate phrases in heldout which are in train set (a) or can be made from
    # phrases in the train set (b)
    a = 0
    b = 0
    for p_h in heldout_phrases:
        # phrase is just in the train set (completely)
        if p_h in train_phrases:
            a += 1
        # or phrase can be made from different phrases in the train set
        elif phrase_combined(p_h, train_phrases):
            b += 1

    total = float(len(heldout_phrases))
    a_f = a/total
    b_f = b/total

    return a_f, b_f

if __name__ == '__main__':
    corpus_nl_train = corpus.Corpus('../../data/lab2/training/p2_training.nl', 'nl')
    corpus_en_train = corpus.Corpus('../../data/lab2/training/p2_training.en', 'en')
    w_a_nl_en_train = allignments.Allignments('../../data/lab2/training/p2_training_symal.nlen', 'nl-en')
    phrase_extractor_train = pe.PhraseExtractor(corpus_nl_train, corpus_en_train,
        w_a_nl_en_train, 'train')


    corpus_nl_heldout = corpus.Corpus('../../data/lab2/heldout/p2_heldout.nl', 'nl')
    corpus_en_heldout = corpus.Corpus('../../data/lab2/heldout/p2_heldout.en', 'en')
    w_a_nl_en_heldout = allignments.Allignments('../../data/lab2/heldout/p2_heldout_symal.nlen', 'nl-en')
    phrase_extractor_heldout = pe.PhraseExtractor(corpus_nl_heldout,
        corpus_en_heldout, w_a_nl_en_heldout, 'heldout')

    #print get_a_b(phrase_extractor_train.phrases.cache,
    #    phrase_extractor_heldout.phrases.cache)

    #e = 'finally , there is the lack of transparency .'
    #f = 'tot slot is er nog het gebrek aan transparantie .'

    #lm = phrase_extractor_train.compute_language_model(e, f)
    #cp = phrase_extractor_train.compute_conditional_probs(f, e)
    #jp = phrase_extractor_train.compute_joint_probability(e, f)
    #print 'lm: ', lm
    #print 'cp: ', cp
    #print 'jp: ', jp

    for p in phrase_extractor_train.phrases.cache.keys()[:1000]:
        print 'phrase: %s' % str(p)
        print 'joint prob.: %s' % phrase_extractor_train.compute_joint_probability(p[0], p[1])
        print 'cond. prob. (p(f|e)): %s' % phrase_extractor_train.p_f_given_e(p[0], p[1])
        print 'cond. prob. (p(e|f)): %s' % phrase_extractor_train.p_e_given_f(p[1], p[0])
