#
# 6.863 -- Final Project
#
# Author: Andreea Bodnari
# Contact: andreeab@mit.edu
# 5/9/11
#
#

from nltk.probability import ConditionalFreqDist
from nltk.corpus import gutenberg
from nltk.corpus import brown
from nltk.corpus import treebank
import sys
import random
import math

class NgramTrain():
    
    GUTENBERG  = "gutenberg"
    BROWN  = "brown"
    TREEBANK = "treebank"
    
    smootherValues = ['uniform','add1', 'backoff_add1', 'backoff_wb']
    punctuation  = ["'", "!", "\\", "#","$", "%", "&", "'", "(", ")", 
                    "*","+", ",", "-", ".", "/", ":", ";", "<", "=", ">", "?", "@", "[\\]^", "`",
                    "{","|","}", "~", "'"]
    
    def __init__(self, ngram, smoothing, corpusName):
        self.ngramOrder = ngram
        self.smoothing = smoothing  
        self.corpusName = corpusName
        
        if corpusName == self.GUTENBERG:
            corpus = gutenberg.words()
        elif corpusName == self.BROWN:
            corpus = brown.words()
        elif corpusName == self.TREEBANK:
            corpus = treebank.words()
        else:
            print "ERROR: Incorrect corpus"
            sys.exit(0)
            
        
        # keep the seed words for generating comparable sentences
        self.seedWords = []
        
        # output file for comparable sentence
        self.outputFile = "../experiments/ComparableSentences.txt"
        fileOpen = open(self.outputFile, 'w')
        fileOpen.close()
        
        self.probabilitiesFile = "../experiments/ComparableProbabilities.txt"
        fileOpen = open(self.probabilitiesFile, "w")
        fileOpen.close()
        
        # generate lower order ngrams
        self.generateLowerNgrams()
        
        self.corpusWords = corpus
        self.trainingCorpus = self.word_by_word(corpus)
        
        self.generatorInit()
    
    @staticmethod
    def wordGenerationMarkovModel(hModel, vModel, hWordsList, vWordsList):
        '''
        Word generation for the markov model
        @param hModel, vModel: the vertical and horizontal model
        @param hWord1, hWord2: the horizontal words 
        @param vWord1, vword2: the vertical words
        
        @return: the generated word  , the heuristic probability
        '''
        
        if None in hWordsList:
            del hWordsList[hWordsList.index(None)]         
            context = ' '.join(hWordsList)
            hPossibleWord = hModel.lowerOrderModel.get(hModel.ngramOrder-1).cfdist[context] 
        else:
            context = ' '.join(hWordsList)
            hPossibleWord = hModel.lowerOrderModel.get(hModel.ngramOrder-1).cfdist[context] 
        
        if len(hPossibleWord) == 0:
            del hWordsList[0]
            context = ' '.join(hWordsList)
            hPossibleWord = hModel.lowerOrderModel.get(hModel.ngramOrder-1).cfdist[context] 
            
        if None in vWordsList:
            del vWordsList[vWordsList.index(None)] 
            context = ' '.join(vWordsList)
            vPossibleWord = vModel.lowerOrderModel.get(vModel.ngramOrder-1).cfdist[context] 
        else:
            context = ' '.join(vWordsList)
            vPossibleWord = vModel.cfdist[context] 
        
        if len(vPossibleWord) == 0:
            del vWordsList[0]
            context = ' '.join(vWordsList)
            vPossibleWord = hModel.lowerOrderModel.get(vModel.ngramOrder-1).cfdist[context] 
              
        blendedPossibleWords = hPossibleWord.samples() + vPossibleWord.samples()
        blendedProbabilities = {}
        
        for el in blendedPossibleWords:
            prob1 = hPossibleWord.freq(el)
            prob2 = vPossibleWord.freq(el)
            
            if prob1 == 0.:
                prob1 = math.pow(10, -5)
            if prob2 == 0:
                prob2 = math.pow(10, -5)
                
            blendedProbabilities[el] = 1.0 * prob1 * prob2
            
        
        # normalize the values of the blended probabilities
        totalSum = sum(blendedProbabilities.values())
        for key in blendedProbabilities.keys():
            if totalSum <> 0:
                blendedProbabilities[key] = blendedProbabilities.get(key)/totalSum
        
        generatedWord = ''
        normProb = 0
        
        while 1:
            # choose the next token, biased by conditional frequency
            randValue = random.random()
            curTotal = 0.0
            samples = blendedProbabilities.keys()
            samples.sort()
            for token in samples:
                curTotal += blendedProbabilities.get(token) 
                if curTotal > randValue:
                    break
            
            if samples <> []:
                generatedWord =  token
                normProb = blendedProbabilities.get(token)
                break
            if blendedProbabilities == {}:
                generatedWord = "."
                normProb = math.pow(10, -5)
                break
        
        return (generatedWord, normProb)
    
    def generateLowerNgrams(self):
        '''
        Generate lower oreder ngrams that our initial model can fall back to
        '''       
        
        self.lowerOrderModel = {}
        
        for order in range(self.ngramOrder):
            if order < 1 :
                continue
            
            self.lowerOrderModel[order] = NgramTrain(order, self.smoothing, self.corpusName)
            
            
    def word_by_word(self, generator):
        for item in generator:
            if type(item) == list:
                for word in item:
                    yield word
            else:
                yield item
        return
    
    def generatorInit(self):
        self.cfdist = ConditionalFreqDist()
        
        # calculate conditional frequencies using corpus
        queue = ["<start>" for i in range(self.ngramOrder-1)] #initialize queue
        
        for word in self.trainingCorpus:
            # ignore special corpus markers
            if word.startswith('*') or word == '0': continue
            queue.append(word)
            if len(queue) == self.ngramOrder:
                context = ' '.join(queue[0:-1])
                token = queue[-1]
                self.cfdist[context].inc(token)
        
                if str(token) == '.': # end of sentence
                    queue = ["<start>" for i in range(self.ngramOrder-1)]
                else:    
                    queue.pop(0)
    
    def gridSeed(self):
        '''
        Generate a random word in the training corpus
        The word will be used for populating the first cell of a grid
        
        @return: a random word in the training corpus
        '''
#        randValue = random.randint(1, len(self.corpusWords))
#        
#        while self.corpusWords[randValue] in self.punctuation:
#            randValue = random.randint(1, len(self.corpusWords))
#        
#        self.seedWords.append(self.corpusWords[randValue])
#        
#        
        contextQueue = ["<start>" for i in range(self.ngramOrder-1)];
        
        while 1:
            # choose the next token, biased by conditional frequency
            context = ' '.join(contextQueue)
            randValue = random.random()
            curTotal = 0.0
            samples = self.cfdist[context].samples()
            samples.sort()
            # if there is no return for the given context
            # return a value based on a lower order ngram
            for token in samples:
                curTotal += self.cfdist[context].freq(token)
                if curTotal > randValue:
                    break
                
            found = False
            for el in self.punctuation:
                if el in token:
                    found = True
            if not found :
                break


        self.seedWords.append(token)
        
        return token
        
    def wordSequenceGeneration(self, sequenceLength):
        '''
        Generate a word sequence based on a linear n-gram model
        
        @param sequenceLength: the length of the generated sequence
        @return: a list containing the sequence words 
        '''
        contextQueue = ["<start>" for i in range(self.ngramOrder-1)];
        usedSequenceLength  = 0
        sequenceElements = []
        
        while 1:
            # we have added a new element to the linear sequence
            usedSequenceLength += 1
            
            # choose the next token, biased by conditional frequency
            context = ' '.join(contextQueue)
            randValue = random.random()
            curTotal = 0.0
            samples = self.cfdist[context].samples()
            samples.sort()
            for token in samples:
                curTotal += self.cfdist[context].freq(token)
                if curTotal > randValue:
                    break
            
            sequenceElements.append( token)
            contextQueue.append(token)
            contextQueue.pop(0)
            
            # break when we have reached the length of the sequence
            if usedSequenceLength == sequenceLength:
                break
            
        return sequenceElements
   
    def generateComparableSentences(self):
        #generate n random sentences
        #Credit to RR1, MIT 6.863 and Andreea Bodnari
        fileOutput  = open(self.outputFile, 'a')
        probabilitiesFile = open(self.probabilitiesFile, 'a')
        
        numberSentences = len(self.seedWords)
        
        for curSent in range(numberSentences):
            newSentence = self.seedWords[curSent] + ' '
            
            contextQueue = ["<start>" for i in range(self.ngramOrder-2)];
            contextQueue.append(self.seedWords[curSent])
            sentenceProbability = 1.
             
            while 1:
                # choose the next token, biased by conditional frequency
                context = ' '.join(contextQueue)
                randValue = random.random()
                curTotal = 0.0
                samples = self.cfdist[context].samples()
                samples.sort()
                # if there is no return for the given context
                # return a value based on a lower order ngram
                if samples == []:
                    lowerOrderNgram = self.lowerOrderModel.get(self.ngramOrder-1)
                    contextQueue.pop()
                    
                    return lowerOrderNgram.generateComparableSentences(contextQueue)
              
                for token in samples:
                    curTotal += self.cfdist[context].freq(token)
                    if curTotal > randValue:
                        break
                    
                sentenceProbability = sentenceProbability * self.cfdist[context].freq(token)
                
                contextQueue.append(token)
                contextQueue.pop(0)
                newSentence += token + ' '
                
                if str(token) == '.' or str(token) == '?' or str(token) == '!':
                    newSentence += "\n"
                    break
                
            probabilitiesFile.write(str(sentenceProbability))
            probabilitiesFile.write("\n") 
            fileOutput.write(newSentence)
        

    def wordGeneration(self, listOfWords): 
        '''
        Generate a random word based on a n-gram probability model
        The first two n-grams elements are passed in as method param
        @param word1: the first n-gram element
        @param word2: the second n-gram element
        
        @return: the third n-gram element 
        '''
        
        if None in listOfWords:
            listOfWords[listOfWords.index(None)] =  '<start>'
            
        while 1:
            
            # choose the next token, biased by conditional frequency
            context = ' '.join(listOfWords)
            
            randValue = 0.
            while randValue < 0.05:
                randValue = random.random()
            
            curTotal = 0.0
            samples = self.cfdist[context].samples()
            
            # if there is no return for the given context
            # return a value based on a lower order ngram
            if samples == []:
                lowerOrderNgram = self.lowerOrderModel.get(self.ngramOrder-1)
                
                listOfWords.remove(listOfWords[0])
                return lowerOrderNgram.wordGeneration(listOfWords)
                
            samples.sort()
            for token in samples:
                curTotal += self.cfdist[context].freq(token)
                if curTotal > randValue:
                    break
            
            if samples <> []:
                generatedWord =  token
                break
            
        return generatedWord
            
    def sequenceProbability(self, aListOfWords):
        '''
        Get the likelihood of sequence 'word1 word2 word3'
        
        @param word1: first element in the sequence
        @param word2: second element in the sequence
        @param word3: third element in the sequence
        
        @return: sequence probability   
        '''
        listOfWords = aListOfWords
        
        if listOfWords == []:
            return str(math.pow(10, -10))
        
        lastWord = listOfWords[len(listOfWords)-1]
        
        # for the first word in the grid, the sequence probability is 
        # word frequency/corpus size
        if listOfWords.count(None) == self.ngramOrder - 1:
            return self.corpusWords.count(lastWord)/len(self.corpusWords)
        
        if listOfWords[0] == None:
            lowerOrderNgram = self.lowerOrderModel.get(self.ngramOrder-1)
            return lowerOrderNgram.sequenceProbability(listOfWords[1:])
        
        context = ' '.join(listOfWords[:-1])
        samples = self.cfdist[context].samples()
        totProbability  = 0
        
        if lastWord in samples:
            totProbability = self.cfdist[context].freq(lastWord)
        
        if totProbability  == 0.0:
            listOfWords.remove(listOfWords[0])
            totProbability = float(self.lowerOrderModel[self.ngramOrder-1].sequenceProbability(listOfWords))
            pass
        
        return str(round(totProbability , 4))
        
        
    