# Ofri Keidar Inbal Wiesel 302933981 305331878

import math

'''
Class implements Lidstone smoothing for unigram model
'''
class UniLidstone:
    
    '''
    Constructor- creates a new object implementing
    Lidstone smoothing for unigram model
    '''
    def __init__(self, VOCAB_SIZE, uniObsVoc, numUniEventsTrain, lambdaVal, numArticles):
        # set members
        self.VOCAB_SIZE = VOCAB_SIZE; # vocabulary size
        self.obsVoc = uniObsVoc; # observed vocabulary in train set
        self.numEventsTrain = numUniEventsTrain; # number of words in train file set
        self.lambdaVal = lambdaVal; # value of lambda smoothing parameter
        self.BEGIN_ARTICLE = "begin-article-event"; # virtual event at article's beginning
        self.numArticles = numArticles; # frequency of virtual "begin-article" unigram event
    
    
    '''
    Calculates the discounted probability for given word, using Lidstone smoothing.
    If no word is given, then returns MLE for a word not in the
    training set, using Lidstone smoothing
    '''
    def getDiscProb(self, inputWord = None):
        
        # calculate numerator
        numerator = self.lambdaVal;
        if inputWord in self.obsVoc:
            numerator = self.lambdaVal + self.obsVoc[inputWord];
            
        # calculate denominator
        denominator = self.numEventsTrain + self.lambdaVal*self.VOCAB_SIZE;
        
        # return discounted probability
        return numerator / denominator;
    
    '''
    Returns number of instances of given word in train set
    '''
    def getFreq(self, word):
        
        # check if virtual "begin-article" event
        if word == self.BEGIN_ARTICLE:
            return self.numArticles;
        
        # check if word does not appear in train set
        if self.obsVoc.get(word) is None:
            return 0;
        
        # return word frequency
        return self.obsVoc[word];
    
    '''
    Returns model's perplexity on given test file
    '''
    def perplexity(self, testFileName, headerPrefix, wordDelim):
        
        #open test file
        testFile = open(testFileName, "r");
        
        # initialize word counter
        numWords = 0;
        
        # sum log of probabilities for each word the test sequence
        sumLogProb = float(0.0);
        for line in testFile:
            
            # skip header or empty line
            if line.strip().startswith(headerPrefix) or line.strip() == "":
                continue
            
            # for each word in current line
            words = line.strip().split(wordDelim) 
            for word in words:
        
                # sum log of probability
                prob = self.getDiscProb(word);
                if prob == 0: # handle case of infinite power of 2 
                    return float("inf");
                sumLogProb += math.log(prob, 2);
        
                # increment word counter
                numWords += 1;
        
        # close test file
        testFile.close();
        
        # normalize sum of log of probabilities
        normSum = sumLogProb / float(numWords);
        
        # return perplexity
        return math.pow(2.0, -normSum); 

    '''
    Prints sum of discounted probability for observed words and discounted probability
    for unseen events
    '''    
    def debug(self):
        
        # number of unseen events
        numUnseen = self.VOCAB_SIZE - len(self.obsVoc);
        
        # sum the probabilities of unseen events
        unseenProbSum = numUnseen * self.getDiscProb();
        
        # sum the probabilities of seen events
        obsEventsProbSum = 0.0;
        for word in self.obsVoc.keys():
            obsEventsProbSum+= self.getDiscProb(word);
            
        # print probabilities sum of all events
        print (unseenProbSum + obsEventsProbSum) 
            
            
        