#!/usr/bin/env python
'''
Refer to em_for_ibm_model_1.rst for overall documentation

wilzzha, guiguan
'''

from collections import defaultdict
from AlignedSent import AlignedSent
from load import load
import sys

init_t = 0
convergent_threshold = 5*1e-2
NULL_TOKEN = "#NULL"
# There should be a function f that relates threshold to input size (to be determined)


class TableCell(object):
    def __init__(self):
        self.t = init_t
        self.count = 0

    def clearCount(self):
        self.count = 0

    def updateT(self, total_f):
        t_history = self.t
        self.t = self.count / total_f
        return abs(t_history - self.t)
        

if __name__ == "__main__":
    # The collection of alignedSent should be provided by other team
    aligned_sents = load('../../data/giza/alignment-en-fr', 'latin_1')

    # generate word sets from corpus
    e = set()
    f = set()
    for aligned_sent in aligned_sents:
        for e_w in aligned_sent.sent1:
            e.add(e_w)
        for f_w in aligned_sent.sent2:
            f.add(f_w)
    f.add(NULL_TOKEN)

    entry_count = len(e) * len(f)
    e_given_f = defaultdict(TableCell)
    total = defaultdict(float)
    s_total = defaultdict(float)
    
    # initialize t(e|f) uniformly
        
    init_t = float(1) / len(e)
    
    global_converged = False
    iteration_count = 0

    while not global_converged:
        iteration_count += 1
        
        for aligned_sent in aligned_sents:
            for e_w in aligned_sent.sent1:
                s_total[e_w] += e_given_f[(e_w, NULL_TOKEN)].t
                for f_w in aligned_sent.sent2:
                    s_total[e_w] += e_given_f[(e_w, f_w)].t

            # collect counts
            for e_w in aligned_sent.sent1:
                cur_e_given_f = e_given_f[(e_w, NULL_TOKEN)]
                tmp = cur_e_given_f.t / s_total[e_w]
                cur_e_given_f.count += tmp
                total[NULL_TOKEN] += tmp
                for f_w in aligned_sent.sent2:
                    cur_e_given_f = e_given_f[(e_w, f_w)]
                    tmp = cur_e_given_f.t / s_total[e_w]
                    cur_e_given_f.count += tmp
                    total[f_w] += tmp

        global_converged = True
        conv_count = 0
        for f_w in f:
            for e_w in e:
                cur_e_given_f = e_given_f[(e_w, f_w)]
                diff = cur_e_given_f.updateT(total[f_w])
                entry_converged = diff <= convergent_threshold
                global_converged = global_converged and entry_converged
                conv_count += entry_converged
                cur_e_given_f.clearCount();
                
            total[f_w] = 0
        print "Converging... %d/%d (%5.4f%%)\r" % \
                (conv_count, entry_count, conv_count * 100.0 / entry_count),
        sys.stdout.flush() # this must be flushed to see the latest result        
    print

    # t(e|f) Dump
    for k, v in e_given_f.iteritems():
        if v.t > 0.75:
            print "t(%s|%s) = %.16f".encode('utf-8') % (k[0], k[1], v.t)
    
    print "===================================================="
    print "Terminated after %d iterations." % (iteration_count)
    print
    
    ###################################################
    # Alignment Learning from t(e|f)
    # Only a few test cases.
    for aligned_sent in aligned_sents:
        # Dump Sentences
        print ' '.join(aligned_sent.sent1).encode('utf-8')
        print ' '.join(aligned_sent.sent2).encode('utf-8')
        # for every English word
        for j,e_w in enumerate(aligned_sent.sent1):
            # find the French word that gives maximised t(e|f)
            # NULL_TOKEN as initial candidate
            f_index, t_max = -1, e_given_f[(e_w,NULL_TOKEN)].t
            
            for i,f_w in enumerate(aligned_sent.sent2):
                t = e_given_f[(e_w,f_w)].t
                if t > t_max:
                    f_index, t_max = i, t
            # dump
            if f_index != -1:
                print "%d-%d" % (j,f_index),
        print

