#    Copyright (c) 2010 Abhishek Patnia (patnia@isi.edu) and Tarang Desai (tarangde@usc.edu)
#
#    Permission is hereby granted, free of charge, to any person obtaining a copy
#    of this software and associated documentation files (the "Software"), to deal
#    in the Software without restriction, including without limitation the rights
#    to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#    copies of the Software, and to permit persons to whom the Software is
#    furnished to do so, subject to the following conditions:
#
#    The above copyright notice and this permission notice shall be included in
#    all copies or substantial portions of the Software.
#
#    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#    IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#    AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#    LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#    OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#    THE SOFTWARE.

from __future__ import division
from collections import defaultdict
import gflags as flags
import sys

FLAGS=flags.FLAGS

# global constants
GIVEN = '->'
NULL = 'NULL'
SEP = ":"

def createCorpus(eng_fn, hindi_fn):
    """
        1) Create a corpus by reading sentences from the given english and hindi filenames
        2) Initialize reverse distortion probabilities
    """
    # Create corpus with tuple of English Hindi sentence pair
    corpus = []
    eng_max_len = -1
    for line in open(eng_fn):
        eng_sent = [NULL]
        for eng_word in line.strip().split(' '):
            if len(eng_word) > 0:
                eng_sent.append(eng_word)
        if len(eng_sent) > eng_max_len:
            eng_max_len = len(eng_sent)
        corpus.append(eng_sent)

    hind_max_len = -1
    for i, line in enumerate(open(hindi_fn)):
        hind_sent = []
        for hind_word in line.strip().split(' '):
            if len(hind_word) > 0:
                hind_sent.append(hind_word)
        if len(hind_sent) > hind_max_len:
            hind_max_len = len(hind_sent)
        corpus[i] = (corpus[i], hind_sent)

    # return the corpus and initialized reverse distortion probabilities
    return (corpus, initRevereseDistortionProbs(corpus))

def initRevereseDistortionProbs(corpus):
    """
        Initialize reverse distort distortion probabilities to a uniform value
    """
    revD = defaultdict(float)
    for english, hindi in corpus:
        m = len(hindi) + 1
        l = len(english) + 1
        for i in range(1, m):
            for j in range(l):
                key = str(j) + GIVEN + str(i) + SEP + str((l - 2)) + SEP + str(m - 1)
                if revD[key] == 0:
                    revD[key] = 1 / (l - 1)
    return revD


def collectCounts(hindi, english, T, revD, countsT, countTFinal, countsRevereseDistortion, countsReverseDistortionFinal):
    """
        Collect frational counts for the given hindi and english sentences
    """
    for iHin, hindi_word in enumerate(hindi):
        sum = 0.0
        for iEng, eng_word in enumerate(english):
            keyEng = str(iEng) + GIVEN + str(iHin + 1) + SEP + str(len(english) - 1) + SEP + str(len(hindi))
            sum += T[hindi_word + GIVEN + eng_word] * revD[keyEng]
        for iEng, eng_word in enumerate(english):
            keyEng = str(iEng) + GIVEN + str(iHin + 1) + SEP + str(len(english) - 1) + SEP + str(len(hindi))
            c = T[hindi_word + GIVEN + eng_word] * revD[keyEng] / sum
            countsT[hindi_word + GIVEN + eng_word] += c
            countTFinal[eng_word] += c
            countsRevereseDistortion[keyEng] += c
            countsReverseDistortionFinal[keyEng.split(GIVEN)[1]] += c

def smoothDistortionCounts(countsReverseDistortion, countsReverseDistortionFinal):
    """
        Smooth distortion counts using laplace smoothing
    """
    laplace = 1.0
    # find the minimum value
    for value in countsReverseDistortion.values():
        if value > 0 and value < laplace:
            laplace = value
    laplace *= 0.5
    print laplace
    # add the laplace value to all counts
    for key in countsReverseDistortion.keys():
        countsReverseDistortion[key] += laplace

    for key in countsReverseDistortionFinal.keys():
        len_hindi = int(key.split(SEP)[-1])
        countsReverseDistortionFinal[key] += laplace * len_hindi

def normalizeCounts(countsT, countTFinal, countsReverseDistortion, countsReverseDistortionFinal,  T, revD):
    """
       Update transition and reverse distortion probabilities probabilities
    """
    for key, item in countsT.items():
        eng_key = key.split(GIVEN)[1]
        T[key] = item / countTFinal[eng_key]
    for key, item in countsReverseDistortion.items():
        keyGiven = key.split(GIVEN)[1]
        revD[key] = item / countsReverseDistortionFinal[keyGiven]


def EM(corpus, T, revD, MAX_ITERATIONS):
    """
        Run EM for the given corpus, transitions probabilities T, reverse distortion probabilities, and max number of
        iterations
    """
    iteration = 0
    print ('********* IBM Model 2 **********\n')
    while (iteration < MAX_ITERATIONS):
        print ('********* ITERATION ********** -> %s\n' % (iteration + 1))
        # clear all counter dictionaries
        countsT = defaultdict(float)    # transition probabilities
        countTFinal = defaultdict(float)    # transition probabilities
        countsReverseDistortion = defaultdict(float)    #reverse distortion probabilities
        countsReverseDistortionFinal = defaultdict(float)   #reverse distortion probabilities
        for (english, hindi) in corpus:
            # step 1: collection of counts
            collectCounts(hindi, english, T, revD, countsT, countTFinal, countsReverseDistortion, countsReverseDistortionFinal)
        # step 2: smooth distortion counts
        smoothDistortionCounts(countsReverseDistortion, countsReverseDistortionFinal)
        # step 3: normalize and update reverse distortion probabilities
        normalizeCounts(countsT, countTFinal, countsReverseDistortion, countsReverseDistortionFinal,  T, revD)
        # increment number of iterations
        iteration += 1

def readProbability(probs_fn):
    """
        Read probabilities from the given file. Format is key=value
    """
    probDict = defaultdict(float)
    for line in open(probs_fn):
        key, prob = line.strip().split('=')
        probDict[key] = float(prob)
    return probDict

if __name__ == '__main__':

    # Flags
    # English data files
    flags.DEFINE_string('engTrainFN', 'data/training/acl2005englishV1.txt', 'English training data filename')
    # Hindi data files
    flags.DEFINE_string('hindTrainFN', 'data/training/acl2005hindiV1.txt', 'Hindi training data filename')

    # Input transitions probabilities
    flags.DEFINE_string('input', 'models/transition.model1.prob', 'Model 1 transition probabilities')
    # Output probabilities
    flags.DEFINE_string('outputT', 'models/transition.model2.prob', 'Transition probabilities output filename for Model 2')
    flags.DEFINE_string('outputRevD', 'models/reverseDistortion.model2.prob', 'Reverse distortion probabilities output filename')
    # Number of transitions
    flags.DEFINE_integer('iterations', 4, 'Number of iterations to run EM')

    # initialize flags from command line
    FLAGS(sys.argv)

    corpus, revD = createCorpus(FLAGS.engTrainFN, FLAGS.hindTrainFN)
    T = readProbability(FLAGS.input) #use the transition probabilities from model 1
    EM(corpus, T, revD, FLAGS.iterations)
    fileT = open(FLAGS.outputT, 'w')
    for key, item in T.items():
        fileT.write('%s=%s\n' % (key, str(item)))
    fileR = open(FLAGS.outputRevD, 'w')
    for key, item in revD.items():
        fileR.write('%s=%s\n' % (key, str(item)))
