#    Copyright (c) 2010 Abhishek Patnia (patnia@isi.edu) and Tarang Desai (tarangde@usc.edu)
#
#    Permission is hereby granted, free of charge, to any person obtaining a copy
#    of this software and associated documentation files (the "Software"), to deal
#    in the Software without restriction, including without limitation the rights
#    to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#    copies of the Software, and to permit persons to whom the Software is
#    furnished to do so, subject to the following conditions:
#
#    The above copyright notice and this permission notice shall be included in
#    all copies or substantial portions of the Software.
#
#    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#    IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#    AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#    LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#    OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#    THE SOFTWARE.

from __future__ import division
from collections import defaultdict
import gflags as flags
import sys

FLAGS=flags.FLAGS

# global constants
GIVEN = '->'
NULL = 'NULL'


def createCorpusInitT(eng_fn, hindi_fn):
    """
        1) Create a corpus by reading sentences from the given english and hindi filenames
        2) Initialize transition probabilities
    """
    # Create corpus with tuple of English Hindi sentence pair
    corpus = []
    for line in open(eng_fn):
        eng_sent = [NULL]
        for eng_word in line.strip().split(' '):
            if len(eng_word) > 0:
                eng_sent.append(eng_word)
        corpus.append(eng_sent)

    for i, line in enumerate(open(hindi_fn)):
        hind_sent = []
        for hind_word in line.strip().split(' '):
            if len(hind_word) > 0:
                hind_sent.append(hind_word)
        corpus[i] = (corpus[i], hind_sent)

    # return the corpus and initialized transition probabilities
    return (corpus, initTProbs(corpus))

def initTProbs(corpus):
    """
        Initialize transition probabilities to a uniform value
    """
    T = defaultdict(float)  # transition probabilities
    for (english, hindi) in corpus:
        for hindi_word in hindi:
            for eng_word in english:
                if T[hindi_word + GIVEN + eng_word] == 0:
                    T[hindi_word + GIVEN + eng_word] = 1 / len(hindi)
    return T


def collectCounts(hindi, english, T, counts):
    """
        Collect frational counts for the given hindi and english sentences
    """
    for hindi_word1 in hindi:
        sum = 0.0
        for eng_word1 in english:
            sum += T[hindi_word1 + GIVEN + eng_word1]
    #for hindi_word1 in hindi:
        for eng_word1 in english:
            counts[hindi_word1 + GIVEN + eng_word1] += T[hindi_word1 + GIVEN + eng_word1] / sum

def normalizeCounts(counts, T):
    """
        Normalize counts and update transition probabilities
    """
    eng_counts = defaultdict(float)
    for key, item in counts.items():
        eng_key = key.split(GIVEN)[1]
        eng_counts[eng_key] += item

    for key, item in counts.items():
        eng_key = key.split(GIVEN)[1]
        T[key] = item / eng_counts[eng_key]


def EM(corpus, T,  MAX_ITERATIONS):
    """
        Run EM for the given corpus, initial transitions probabilities T, and max number of iterations
    """
    iteration = 0
    print ('********* IBM Model 1 **********\n')
    while (iteration < MAX_ITERATIONS):
        print ('********* ITERATION ********** -> %s\n' % (iteration + 1))
        counts = defaultdict(float)  # clear counts before the start of each iteration
        for (english, hindi) in corpus:
            # step 1: collection of counts
            collectCounts(hindi, english, T, counts)
        # step 2: normalize and update transition probabilities
        normalizeCounts(counts, T)
        # increment number of iterations
        iteration += 1

if __name__ == '__main__':

    # Flags
    # English data files
    flags.DEFINE_string('engTrainFN', 'data/training/acl2005englishV1.txt', 'English training data filename')
    # Hindi data files
    flags.DEFINE_string('hindTrainFN', 'data/training/acl2005hindiV1.txt', 'Hindi training data filename')
    # output file
    flags.DEFINE_string('output', 'models/transition.model1.prob', 'Transition probabilities output filename')
    # number of iterations
    flags.DEFINE_integer('iterations', 10, 'Number of iterations to run EM')

    # initialize flags from command line
    FLAGS(sys.argv)

    corpus, T = createCorpusInitT(FLAGS.engTrainFN, FLAGS.hindTrainFN)
    EM(corpus, T, FLAGS.iterations)
    file = open(FLAGS.output, 'w')
    for key, item in T.items():
        file.write('%s=%s\n' % (key, str(item)))