from collections import defaultdict
from copy import deepcopy
from random import random
from cache import Cache
import corpus

import re

class Ibm1(object):
    """
    * l, length of english sentence
    * m, length of french sentence
    """

    def __init__(self, corpus_f, corpus_e, epsilon, verbose=False):
        """ epsilon = p(m|e) """

        assert len(corpus_f.corpus) == len(corpus_e.corpus), 'corpora should be equal length'

        self.corpus_f = corpus_f
        self.corpus_e = corpus_e
        self.epsilon = float(epsilon) # <- force float for float division later
        self.verbose = verbose

        # initialize t(f|e) and q(j|i,l,m) randomly
        self.tfe = Cache('tfe', defaultdict(lambda: int(random() \
            * len(self.corpus_f.corpus))))
        #self.qjilm = Cache('qjilm', defaultdict(lambda: int(random() \
        #    * len(self.corpus_f.corpus))))
        # the above is actually pretty bad design. Because later on, normal
        # dictionaries are used, but we do not want to change it all over again

    def get_e(self, f):
        """ returns argmax e get_p_f_given_e(f, e),

        Basically, we can just arg max over the training sentence pairs? At
        least, this is what the assignment suggests.

        Otherwise we would generate a set of english words by iterating over all
        french words and obtaining all possible translations. Then we would
        create the set of english sentences from those words by arranging them
        in all possible orderings. But we guess this is not necessary :) """
        m_e, m_p = '', 0
        for i, e in enumerate(self.corpus_e.corpus):
            p = self.get_p_e(e) * self.get_p_f_given_e(f, e)
            if p > m_p:
                m_e, m_p = e, p
        return m_e, m_p

    def get_viterbi_alignment(self, f, e):
        """
        returns the alignment for f and e. Could be used to find e as well?

        for each j, we simply choose a_j so as to make the product
            t(f|eaj) * q(a_j | j, m, l) as large as possible.
        """
        l = len(e)
        m = len(f)

        alignment = []

        # for each french word
        for i in xrange(1, m):
            p_m, a_i = 0, -1

            # we align it to english position j
            for j in xrange(l):
                p = self.tfe.cache[f[i], e[j]] * (1./(l+1))# self.qjilm.cache[j, i, l, m] * 

                # which maximizes above
                if p > p_m:
                    p_m, a_i = p, j
            alignment.append(a_i)

        # index: index of french word
        # value: index of english word
            # needs to go to
        # key: index of english word
        # value: list of indices french words

        al = {}
        for i, a in enumerate(alignment):
            if not al.get(a):
                al[a] = []
            al[a].append(i + 1)
        return al

    def get_p_f_given_e(self, f, e):
        """
        returns P(f|e) =
            $\frac{epsilon}{(l+1)^m} \prod^{m}_{j=1} \sum^{l}_{i=0}t(f_j|e_i)$

        See page 272 from 'The mathematics of statistical machine translation;
            parameter estimation' (p263-brown.pdf) for the specifics
        """

        # ADD NULL HERE
        e = ['NULL'] + e

        l = len(e)
        m = len(f)

        # easy stuff:
        part_a = self.epsilon/((1+l)**m)

        part_b = 1.
        # product
        for j in xrange(1, m):
            part_c = 0.
            for i in range(l):
                # can fail if unseen in training data: asign prob. 0.
                # alternatively, we could smooth (but we wont)
                try:
                    part_c += self.get_t_f_e(f[j], e[i]) # t(f_j|e_i)
                except:
                    return 0.
            part_b *= part_c

        return part_a * part_b

    def get_t_f_e(self, f_word, e_word):
        """ t(f|e), f from F, e from E, union {NULL}. The conditional
        probability of generating French word f from English word e

        t_ml (f|e) = c(e, f) / c(e)
        c(e,f) is the number of times word e is aligned to word f in the
        training data. c(e) is the total number of times that e is aligned to
        any french word """
        return self.tfe.cache[f_word, e_word]

    def get_p_e(self, e):
        """ returns P(e)"""
        p_e = float(1)
	e = corpus.Corpus.add_sentence_delimiters(e)
	n = 3
        for i in range(0, len(e)-(n-1)):
            trigram = ' '.join(map(lambda x: str(x), e[i:i+n]))
            bigram = ' '.join(map(lambda x: str(x), e[i:i+n-1]))

            trigram_count = float(self.corpus_e.trigram.cache.get(trigram, 0))
            bigram_count = float(self.corpus_e.bigram.cache.get(bigram, 0))

            if bigram_count == 0 or trigram_count == 0:
                return 0

            p_e *= (trigram_count / bigram_count)
        return p_e

    def em_train(self):
        """
        Implemented according to 'Statistical Machine Translation: IBM Models 1
            and 2' by Michael Collins .
        """
        # c(e, f) number of times word e is aligned to f
        # c(e) number of times e is aligned to any f
        # c(j|i,l,) number of times english sentence of len l and french sentence of length m
        #   where word i in french is aligned to word j in english
        # c(i,l,m) number of times english sentence len l together with french sentence len m

        if self.verbose:
            print 'Start of EM train'

        # initialize t(f|e) and q(j|i,l,m) randomly NO, we are using a CACHE!
        #self.tfe = defaultdict(lambda: int(random() * len(self.corpus_f.corpus)))
        #self.qjilm = defaultdict(lambda: int(random() * len(self.corpus_f.corpus)))

        # start with some iterations (should stop when converged)
        for s in xrange(20):

            if self.verbose:
                print 'Iteration: %d' % s

            # set all counts to zero
            cef = defaultdict(int)
            ce = defaultdict(int)

            for k, french_sentence in enumerate(self.corpus_f.corpus):
                for i, f_i in enumerate(french_sentence):
                    m = len(french_sentence)
                    l = len(self.corpus_e.corpus[k])

                    # ADD NULL HERE?
                    e_k = ['NULL'] + self.corpus_e.corpus[k]

                    for j, e_j in enumerate(e_k):

                        # calculate the delta once
                        delta_k_i_j = self.delta(k, i, j, f_i, e_j, l, m)

                        cef[e_j, f_i] += delta_k_i_j
                        ce[e_j] += delta_k_i_j

            # update
            self.tfe.cache = Ibm1.update_dicts_1(cef, ce)

            # save! :D
            self.tfe.save()


    def delta(self, k, i, j, f_i, e_j, l, m):
        """
        k, kth sentence of the corpus
        i, position i in the kth (french) sentence
        j, position j in the kth (english) sentence
        f_i, ith french word in kth french sentence
        e_j, jth english word in kth english sentence
        l, length of kth english sentence
        m, length of mth englihs sentence
        """
        numerator = self.tfe.cache[f_i, e_j]

        denominator = 0
        for j_2 in xrange(l):
            denominator += self.tfe.cache[f_i, self.corpus_e.corpus[k][j_2]]

        return numerator / float(denominator)

    @staticmethod
    def update_dicts_1(cef, ce):
        tfe = {}
        for e, f in cef:
            tfe[f,e] = cef[e,f] / float(ce[e])
        return tfe


def precision_recall(comp, gold):
    comp_lines = []
    gold_lines = []
    with open(comp) as comp_file:
        comp_lines = map(lambda x: x[1], filter(lambda i: (i[0]+1) % 3 == 0 \
            or i[0] == 1, enumerate(comp_file.readlines())))[1:]
    with open(gold) as gold_file:
        gold_lines = map(lambda x: x[1], filter(lambda i: (i[0]+1) % 3 == 0 \
            or i[0] == 1, enumerate(gold_file.readlines())))[1:]

    assert len(gold_lines) == len(comp_lines), 'input files should be equal length'


    # calculate precision and recal here
    precision = 0.
    recall = 0.

    r1 = re.compile('\({[ 1-9]+')
    r2 = re.compile('[0-9]')

    for c, g in zip(comp_lines, gold_lines):

        c_hits = list_of_hits_from_string(c)
        g_hits = list_of_hits_from_string(g)

        t = 0
        c = 0
        ct = 0
        for c_hit, g_hit in zip(c_hits, g_hits):
            t += len(g_hit)
            c += len(filter(lambda x: x in g_hit, c_hit))
            ct += len(c_hit)

        precision += float(c)/ct if ct != 0 else 0
        recall += float(c)/t if t != 0 else 0

        #flatten_c_hits = [item for sublist in c_hits for item in sublist]
        #flatten_g_hits = [item for sublist in g_hits for item in sublist]
        #correct = filter(lambda x: x in flatten_g_hits, flatten_c_hits)
        #precision += float(len(correct))/len(flatten_c_hits) if correct != [] else 0
        #recall += float(len(correct))/len(flatten_g_hits)

    return precision / len(comp_lines), recall / len(gold_lines)

def list_of_hits_from_string(s, r1=re.compile('\({[ 1-9]+'), \
        r2=re.compile('[0-9]')):
    hits_u = r1.findall(s)

    hits = []
    for hit in hits_u:
        hits.append(r2.findall(hit))
    return hits

if __name__ == '__main__':

    print 'p, r', precision_recall('alignments', 'alignments_gold')
    exit()


    corpus_nl = corpus.Corpus('../../data/lab1/corpus1000.nl', 'nl')
    corpus_en = corpus.Corpus('../../data/lab1/corpus1000.en', 'en')

    ibm1 = Ibm1(corpus_nl, corpus_en, 0.1, verbose=True)

    # uncomment for training
    #ibm1.em_train()
    #exit()

    # testing some sentence
    #for i, nl in enumerate(corpus_nl.corpus):
    #    print ibm1.get_p_f_given_e(nl, corpus_en.corpus[i])

    for i, nl in enumerate(corpus_nl.corpus):
        e, p = ibm1.get_e(nl)

        e = ['NULL'] + e

        # ADD NULL HERE?
        alignments = ibm1.get_viterbi_alignment(nl, e)

        print '# sentence pair (%d) source length %d target length %d alignment score : %s' %\
            (i + 1, len(nl), len(e), p)
        print ' '.join(map(lambda x: str(x), nl))
        for j, e in enumerate(e):
            x = alignments.get(j, '{ }')
            if type(x) != str:
                x = '{ %s }' % ' '.join(map(lambda x: str(x + 1), x))
            print '%s (%s) ' % (e, x),
        print
