# an attempt at Lloyd's Algorithm
# for now, input points as an array of arrays
from __future__ import division
import random
import math
import globals

'''
NOTE: globals.Julia_words_encountered must be changed to the corresponding global
a find & replace should suffice
'''
# takes an array of ChunkData objects and groups them into k clusters
# and computes the centroid of each cluster
def lloyd():
    k = len(globals.authors)-1
    dim = len(globals.Julia_words_encountered)
    random.seed()
    # make a dictionary of cluster -> members (ChunkData objects)
    # also a dictionary of cluster -> centroid
    cToMembers = {}
    cToCentroid = {}
    for i in range(k):
        cname = str(i)
        globals.Julia_authors.add(cname)
        # cToMembers[i] will be a list of ChunkData objects
        cToMembers[cname] = []
        cToCentroid[cname] = {}
        # there's probably a better way to do this initialization...?
        for w in globals.Julia_words_encountered:
            cToCentroid[cname][w] = 0
        
    # initialize by randomly assigning each chunk to a cluster
    # make sure none contain 0 points
    random.shuffle(globals.chunk_list)
    clumpSize = int(round(len(globals.chunk_list)/k))
    print clumpSize
    start = 0
    for i in range(k-1):
        cluster = str(i)
        cToMembers[cluster].extend(globals.chunk_list[start:start+clumpSize])
        start += clumpSize
    cToMembers[str(k-1)].extend(globals.chunk_list[start:])
    #print("initial clusters: " + str(cToMembers))
    done = False
    count = 0
    while not done:
        count += 1
        cToMembers, cToCentroid, done = iterate(cToMembers, cToCentroid, dim, k)
    print('converged in ' + str(count) + ' iterations')
    #authorToCentroid = assign_centroids(cToMembers, cToCentroid)
    return cToCentroid, cToMembers

def miniLloyd(cToCentroid, cToMembers):
    k = len(globals.authors)-1
    dim = len(globals.Julia_words_encountered)
    done = False
    count = 0
    while not done:
        count += 1
        cToMembers, cToCentroid, done = iterate(cToMembers, cToCentroid, dim, k)
    print('converged in ' + str(count) + ' iterations')
    return cToCentroid, cToMembers


# this method takes in dicts of the cluster code to members and centroids
# as well as the original points array
# Python passes by reference, so no need to return anything
# 1. compute centroid of each cluster
# 2. reassign points to whichever centroid is closer
def iterate(cMembers, cCenters, dim, k):
    
    """
    for vals in cMembers.itervalues():
        for itm in vals:
            for key,val in itm[0].synsets.iteritems():
                print key, val"""
            
    
    is_done = False

    '''
    1. COMPUTE CENTROID OF EACH CLUSTER
    '''
    has_converged = True
    for cluster in cMembers.keys():
        oldCentroid = cCenters[cluster]
        numPoints = len(cMembers[cluster])
        #centroid is the dict that will contain our newly computed centroid
        centroid = {}
        # initially populate centroid with zeros for all dimensions
        for w in globals.Julia_words_encountered:
            centroid[w] = 0
        # use centroid list to keep totals, which we will average
        for chunk in cMembers[cluster]:
            wordVector = chunk.synsets
            for word in globals.Julia_words_encountered:
                value = 0
                if wordVector.has_key(word):
                    value = wordVector[word]
                centroid[word] += value
                

        # now compute averages
        for w in globals.Julia_words_encountered:
            centroid[w] /= numPoints
            if not centroid[w] == oldCentroid[w]:
                has_converged = False
        # now shove this new centroid into the dict of centroids
        cCenters[cluster] = centroid
        
    '''
    2. reassign each point to the closest centroid (Euclidean distance)
    '''
    # make a new, empty cMembers dict
    cMembers = {}
    for i in range(k):
        cMembers[str(i)] = []

    # iterate through every chunk
    for chunk in globals.chunk_list:
        point = chunk.synsets # get actual vector
        cToDistance = {}
        minDistance = float("inf")
        bestCluster = -1
        # for each chunk, calculate distance to every centroid
        for c in cCenters.keys():
            ctr = cCenters[c] 
            dsum = 0
            for d in globals.Julia_words_encountered:
                val = 0
                if point.has_key(d):
                    val = point[d]
                diff = ctr[d] - val
                dsum += math.pow(diff,2)
            dist = math.sqrt(dsum)
            cToDistance[c] = dist
            if dist < minDistance:
                minDistance = dist
                bestCluster = c
        cMembers[bestCluster].append(chunk)
    return cMembers, cCenters, has_converged
    # actually return just the centroid of each cluster    


def assign_centroids(cMembers, cCenters):
    cToAuthorFreqz = {}
    authorToCount = {}
    clusterCodeToAuthor = {}
    authorToCentroid = {}
    collisions = 0
    for cluster in cMembers.keys():
        cToAuthorFreqz[cluster] = {}
        for chunk in cMembers[cluster]:
            auth = chunk[0].OrigAuthor
            # first time we see an author:
            if not authorToCount.has_key(auth):
                authorToCount[auth] = 0
                cToAuthorFreqz[cluster][auth] = 0
            authorToCount[auth] += 1
            cToAuthorFreqz[cluster][auth] += 1
    for cluster in cToAuthorFreqz.keys():
        maxCount = 0
        for author in cToAuthorFreqz[cluster].keys():
            count = cToAuthorFreqz[cluster][author] 
            if count > maxCount:
                maxCount = count
                bestAuthor = author
        clusterCodeToAuthor[cluster] = bestAuthor
    for cluster in cCenters.keys():
        owner = clusterCodeToAuthor[cluster]
        if not authorToCentroid.has_key(owner):
            authorToCentroid[owner] =  cCenters[cluster]
        # case for if the same author dominate multiple clusters
        # what do we actually want to do? 
        else:
            collisions += 1
            owner += str(collisions) 
            authorToCentroid[owner] =  cCenters[cluster]
    return authorToCentroid
        

