from __future__ import division
from nltk import *
import globals
import lloyd
import functions

#using the unclustered (i.e. straight up the vector representing each chunk) we will find the cluster for each author

"""
process to get the proper cluster vectors.

notation: 
c_i <- cluster i's core, C_i <- cluster i
mu_chunk <- synset

1. find c_i by taking the average of all the mu_chunks for that author
2. for each m_chunk find its similarity to one of c_i to assign it to a particular C_i
3. recompute c_i based on the all the mu_chunks that are said to be part of C_i
4. repeat 2-3 until c_i_new = c_i_old for 1<= i <=N

globals.clustered_mu <-- final centroids
"""

def cluster():
    print "Start clustering"
    #step 1
    
    #here we have our initial centroids based on distribution of chunks in unclustered_mu
    #this is that research paper; Julia needs to read about ncut
    
    #should return a dictionary of Synset objects
    globals.orig_chunk_list = globals.chunk_list
    
    #globals.interm_mu = globals.unclustered_mu
    inter_centroids, chunkInAuth = lloyd.lloyd()
    #print "haaai",inter_centroids
    amt_chunks = []
    amt_chunks.append(len(globals.chunk_list))
    print "====================================== ", len(globals.chunk_list)
    amt_iter = 0
    prev_amt_chunks = len(globals.chunk_list)
    while(True):
        
        #display distance between centroids
#        ke = inter_centroids.keys()
#        print "amt of words to consider ", len(inter_centroids.values())
#        for w in globals.Julia_words_encountered:
#            print w, inter_centroids[ke[0]][w] - inter_centroids[ke[1]][w]
        
        #step 2
        #in the hopes of finding new centroids
        #chunks regrouped by their new authors in interm_mu (i.e. to which they are closest)
        functions.regroup_chunks(inter_centroids, chunkInAuth,amt_iter)
        amt_iter+=1
        print "====================================== ", len(globals.chunk_list)
        #step 3
        #recompute centroids given the chunks in interm_mu
        #should return a dictionary of Synset objects
        inter_new_centroids, chunkInAuth  = lloyd.miniLloyd(inter_centroids, chunkInAuth)
        
        #step 4
        #repeat to convergence
        amt_chunks.append(len(globals.chunk_list))
        #if(functions.equals(amt_chunks)):
        if(prev_amt_chunks == len(globals.chunk_list)):
            #once we converge
            #need julia to put all the chunks according to their author in a
            #dict
            globals.clustered_mu = chunkInAuth
            print globals.clustered_mu
            break
        else:    
            inter_centroids = inter_new_centroids   
            prev_amt_chunks = len(globals.chunk_list)