#Matan Keidar 066500992
#Eliahu Khalastchi 040854705

from Corpus import *

import math



class Lidstone:
    '''
    Represents a Lidstone smoothing method
    '''
    
    def __init__(self, corpus):
        '''
        Constructor        
        '''
        print "unigram Lidstone CTOR"  
        self.V = 300000
        self.corpus = corpus    # corpus
        self.lambdaParam = 0     # lambda parameter
        self.denominator = float(self.corpus.S() + self.lambdaParam*self.V); # optimization: used to calculated the denominator in the calculation of Lidstone smoothing
    
    def getLambda(self):
        '''
        Gets the value of the lambdaParam
        '''
        return self.lambdaParam
        
    def setLambda(self, newLambda):
        '''
        Sets a new value to lambdaParam
        '''
        self.lambdaParam= newLambda
        self.denominator = float(self.corpus.S() + self.lambdaParam*self.V);
        
    def calcProbability(self, word):
        '''
        Calculates the probability of the given word according to Lidstone smoothing method
        '''
        return (self.corpus.C(word) + self.lambdaParam) / self.denominator
    
    def calcFrequency(self, r):
        '''
        Calculates the r* frequency of the given mode according to Lidstone smoothing method
        '''
        return (r + self.lambdaParam) / self.denominator * self.corpus.S()
     
    def calcPerplexity(self, fileName):
        '''
        Calculates the perplexity of the model according to the given test file
        '''
        sum = float(0)
        counter = 0
        
        #open test file
        input = open(fileName, "r")
        
        #scan each word in test file and sum its log scaled probability 
        for line in input:
            words = line.strip().split()
            
            for word in words: 
                sum += math.log(self.calcProbability(word),2)
                counter += 1
                
        input.close()
        
        #return perplexity 
        return math.pow(2, -1.0 / counter * sum)
        
    def debug(self):
        '''
        Debug current model probabilities
        '''
        sum = float(0)
        
        #scan each word in corpus and accumulate its probability
        for word in self.corpus.getEntries():
            sum += self.calcProbability(word)
        
        #accumulate the probabilities of the "unseen" words
        sum += (self.V - self.corpus.X()) * (self.lambdaParam/ (self.corpus.S() + self.lambdaParam*self.V));
        
        return sum
    
    def calcBestLambda(self, testFile, outFile):
        '''
        Calculates the best lambdaParamaccording to the perplexity measurement
        '''
        output = open(outFile, "w")
        
        minlambda= 1 #arbitrary default value of Laplace smoothing
        minPerplexity = float("infinity")
        
        #scan each word in corpus and accumulate its probability
        for i in range(1, 1000) :
            currLambda= i * 0.01
            
            self.setLambda(currLambda) #set current lambda
            
            currPerplexity = self.calcPerplexity(testFile) #calculate perplexity measurement
            
            print "checking current clambdaParam=", currLambda, "current perplexity =", currPerplexity, "min lambdaParam=", minlambda
            
            if currPerplexity < minPerplexity: #analyze result
                minlambda= currLambda
                minPerplexity = currPerplexity
            
            #write to output file
            print >> output, "%.2f\t%f" % (currLambda, currPerplexity)  
                
        output.close() 
        return minlambda
    
    
    def setV(self,v):
        self.V=v
        self.denominator = float(self.corpus.S() + self.lambdaParam*self.V)