# Matan Keidar 066500992
# Eliahu Khalastchi 040854705

from Utils import *
import random
import math

debug = True            # debug symbol (used for debug printouts)
NUM_TOPICS = 9          # number of clusters
NUM_UNIQUE_WORDS = 0    # number of unique words in documents 
NUM_DOCS = 0            # number of documents in training set
K = 300                 # k parameter of underflow
V = 30000               # dictionary size
EPSILON = 0.0001        # epsilon parameter of underflow
LAMBDA = 0.14           # lambda parameter of Lidstone smoothing
TOLERANCE = 1.0         # tolerance parameter (stop condition interval)


def DEBUG(x):
    '''
    Debug printout
    '''
    if (debug):
        print "DEBUG", x 


def init():
    '''
    Initialization of data structures
    '''
    DEBUG("init")
       
    # divide probability mass uniformly among all topics   
    remaining_prob = 1.0 
    weight = 1.0 / NUM_TOPICS
    
    for i in range(NUM_TOPICS - 1):
        
        P_Xi_arr[i] =  weight # for each Xi set probability of 1/M
        remaining_prob -= weight
        
    P_Xi_arr[NUM_TOPICS - 1] = remaining_prob

    # set random probability to each word --> P(Wk|Xi) = random 
    for Wk in wordList:
        P_Wk_given_Xi_arr[Wk]={}
        for i in range(NUM_TOPICS):
            val = 0.0
            
            # avoid assigning 0 probabilistic value
            while val == 0.0:
                val = random.random()            
            P_Wk_given_Xi_arr[Wk][i] = val

    return

def calc_Zti_matrix():
    '''
    calculates the Zti values (underflow optimization, used for scaling)
    '''
    sumofallmaximium=0.0
    
    # scan each document
    for t in range(NUM_DOCS):
    
        Zti_matrix[t]={} # initialize the values of current Zti   
        
        # scan each topic
        for i in range(NUM_TOPICS):
            
            alpha_i = P_Xi_arr[i] # get alpha_i

            # scan each word in current document      
            sigma = 0.0
            sumNtkc=0
            for w in Ntk[t].getEntries():
                Ntkc = Ntk[t].C(w)
                sumNtkc +=Ntkc # accumulate the total frequencies
                sigma += Ntkc * math.log(P_Wk_given_Xi_arr[w][i])
                            
            # set current Zti value
            Zti_matrix[t][i] = math.log(alpha_i) + sigma 
        
        # calculate max value of Zti (for each document)    
        maxZi_arr[t]=max(Zti_matrix[t].values())
        
        # accumulate the max Zti's values 
        sumofallmaximium += maxZi_arr[t]        
        
def calc_Wti_Matrix(K):
    '''
    calculates the Wti values according to the optimization parameter K
    '''
    
    # scan each document
    for t in range(NUM_DOCS):
        denominator=0.0
        m = maxZi_arr[t]
        
        # optimization: calculate the denominator only once
        Wti_matrix[t]={}
        for j in range(NUM_TOPICS):
            Zj = Zti_matrix[t][j]
            if Zj - m > -K :
                denominator +=  math.exp(Zj - m)
        
        # scan each document    
        for i in range(NUM_TOPICS):
            
            # get current Zi
            Zi = Zti_matrix[t][i]
            
            # optimization: check if condition is fulfilled
            if Zi - m >= -K:
                Wti_matrix[t][i] = (math.exp(Zi - m)) / denominator
            else:
                Wti_matrix[t][i] = 0 

def calcP_Xi_arr():
    '''
    calculates the alpha_i values
    '''
    
    frac = 1.0 / NUM_DOCS
    
    # scan each topic
    for i in range(NUM_TOPICS):
                
        sigma = 0.0
        
        # scan each document
        for t in range(NUM_DOCS):
            
            # accumulate the probability mass of current topic
            sigma+= Wti_matrix[t][i]
        
        
        val = frac * sigma
        
        # check for underflow (clipping)
        if val < EPSILON:
            val = EPSILON
        
        # set P(Xi) new value    
        P_Xi_arr[i] = val


def calc_Wk_given_Xi_numerator(Wk,i):
    '''
    calculates the numerator of P(Wk|Xi) probability (the function requires providing these variables)
    '''
    
    sigma = 0.0
    
    # scan each document
    for t in range(NUM_DOCS):
        
        # get number of appearances of Wk in current document
        Ntkc=Ntk[t].C(Wk)
        
        if Ntkc > 0:
            sigma+=Wti_matrix[t][i] * Ntkc
    
    # return probability        
    return sigma + LAMBDA
        
def calc_Wk_given_Xi_denminator(i):
    '''
    calculates the denominator of P(Wk|Xi) probability (the function requires providing these variables)
    '''
    
    sigmaDenom = 0.0
    
    # scan each document
    for t in range(NUM_DOCS):
        
        # get current Wti probability value
        Wti = Wti_matrix[t][i]
        sigmaDenom += Wti * Ntk[t].S()
    
    # return probability     
    return sigmaDenom + V*LAMBDA
    
def calcP_Wk_given_Xi_arr():
    '''
    calculates all P(Wk|Xi) probability values
    '''    
    
    # scan each topic
    for i in range(NUM_TOPICS):
        
        # calculates its denominator value             
        denom = calc_Wk_given_Xi_denminator(i)
        
        k=0
        
        # scan each word
        for Wk in wordList:
            # calculates its numerator value
            numer = calc_Wk_given_Xi_numerator(Wk, i)
           
            # calculate the probability
            P_Wk_given_Xi_arr[Wk][i] = numer / denom
            k += 1
    
def Estep():
    '''
    E-step of the EM algorithm
    '''
    DEBUG("E-step")
    DEBUG("\tZti-step")
    
    calc_Zti_matrix() # calculates the Zti values of current level 
    
    DEBUG("\tWti-step")
    
    calc_Wti_Matrix(K) # calculates the Wti values of current level

    # scan each P(Xi) value and print it 
    for alpha_i in P_Xi_arr:
        print "\t E: alpha is ",alpha_i


def Mstep():
    '''
    M-step of the EM algorithm
    '''
    DEBUG("M-step")
    DEBUG("\tXi arr")
    
    calcP_Xi_arr()  # calculates new P(Xi) values (new alpha_i)
    
    DEBUG("\tWk given Xi arr")
    
    calcP_Wk_given_Xi_arr() # calculates the P(Wk|Xi) values of current level
    
    # scan each P(Xi) value and print it
    for alpha_i in P_Xi_arr:
        print "\t M: alpha is ",alpha_i
    
def calcLikelihood():
    '''
    calculates the likelihood of the model
    '''
    
    sigma = 0.0
    
    # scan each document
    for t in range(NUM_DOCS):
        
        # get the max Zti according to current document
        m = maxZi_arr[t]
        
        sigmaj=0.0
        
        # scan each topic
        for j in range(NUM_TOPICS):
            
            # get current Zj according to current topic
            Zj = Zti_matrix[t][j]
            
            # optimization: avoiding underflow)
            if Zj - m >= -K :
                sigmaj +=  math.exp(Zj - m)
  
        sigma += m + math.log( sigmaj )
        
    # return likelihood    
    return sigma


def createConfusionMatrix():
    '''
    create the confusion matrix according to current model
    '''
    
    # create an empty confusion matrix
    Cmatrix = zeroMatrix(NUM_TOPICS, NUM_TOPICS)
    

    classification = []
    
    # scan each document
    for t in range(NUM_DOCS):
        M=float(0) # current max
        cluster=0
        
        # scan each topic and find its argmax topic probability  
        for i in range(NUM_TOPICS):
            
            # calculate Pti value
            Pti = Wti_matrix[t][i] / P_Xi_arr[i]
            
            # find max
            if(M < Pti):
                cluster=i
                M=Pti
        
        classification.append(cluster)
        # scan each topic 
        for topic in topics[t]:
            
            Cmatrix[cluster][topic] += 1 # increment the counter in confusion matrix
            
        
    
    
    # print the list of topics
    print list(x for x,y in topicsList)
    
    # print the confusion matrix
    for i in range(NUM_TOPICS):
        print Cmatrix[i]        

    #calculate accuracy:
    
    # map cluster to topics
    cluster2topic=[]
    for cluster in range(NUM_TOPICS):
        topic=Cmatrix[cluster].index(max(Cmatrix[cluster]))
        cluster2topic.append(topic)
    
    # change cluster to topic in classification[t]    
    for t in range(NUM_DOCS):
        cluster = classification[t]
        classification[t] =cluster2topic[cluster]
        
    # calculate accuracy
    accuracyCount = 0
    for t in range(NUM_DOCS):
        correct=False
        for topic in topics[t]: # real classifications
            if classification[t] == topic:
                correct=True 
        if correct:
            accuracyCount +=1
    
    print "correct:" , accuracyCount
    print "out of:" , NUM_DOCS
    print "the accuracy is ", float(accuracyCount)/float(NUM_DOCS) 

topicsList = [] # all topics from topics file
topics=[] # list that contains for each document a list of its topics according to input data

(NUM_TOPICS, topicsList), (Ntk, NUM_DOCS, wordList, NUM_UNIQUE_WORDS, topics) = parseInputs("../halfSet.txt", "../topics.txt")
V=NUM_UNIQUE_WORDS # total unique words

DEBUG(("Num topics = ", NUM_TOPICS))
DEBUG(("Num Docs = ", NUM_DOCS))
DEBUG(("#Unique words = ", NUM_UNIQUE_WORDS))

 # the probability array of the sense Sk P(Sk)
P_Xi_arr = zeroList(NUM_TOPICS)
  
# the probability array of Vj given sense SK array P(Vj|Sk)
P_Wk_given_Xi_arr={} 

Wti_matrix={}

Zti_matrix={}

maxZi_arr = zeroList(NUM_DOCS)

#initialization of data structures
init()

currL = 0
oldL = 100

# iterate until convergences
currIter = 1
while (abs(oldL - currL) > TOLERANCE):    
    Estep()
    Mstep()
    oldL = currL
    currL = calcLikelihood()
    print "after M L=%.20f" % (currL)
    print "**************************************************************************"
DEBUG("DONE")

# print the confusion matrix
createConfusionMatrix()