
import char_inputs
import math
import copy
DEBUG=False

def vector(word, weights):
    """The vector is a list of length len(greeklish_characters). 
    For each occurence of a greeklish character in the word, the relevant list member is incremented by one.  """
    global DEGUG
    v = []
    for i in range(0,len(char_inputs.greeklish_ch)):
        v.append(0)
    
    for c in word:
        try:
            idx = char_inputs.greeklish_ch.index(c)
            weight = weights.get(c,1)
            v[idx]+=weight
        except ValueError:
            print 'char',c,'not found in greeklish_ch'
    
    return tuple(v)

def angle(vector1, vector2):
    """ Returns the cosine angle of two vectors. 1 is for absolute match. 0 is for no match"""
    #We have N-dimensioned vectors
    assert len(vector1)==len(vector2), 'The two vectors have different length'
    dot = 0
    v1_size = 0
    v2_size = 0
    for i in range(0,len(vector1)):
        dot+= vector1[i]*vector2[i]
        v1_size+=vector1[i]*vector1[i]
        v2_size+=vector2[i]*vector2[i]        
    
    if v1_size==0 or v2_size==0:
        raise Exception, """You can't pass a vector full of zeroes!"""
        
    v1_size=math.sqrt(v1_size)
    v2_size=math.sqrt(v2_size)

    cos_angle = float(dot)/(v1_size*v2_size)
    return cos_angle

def eucl_distance(vector1,vector2):
    #We have N-dimensioned vectors
    assert len(vector1)==len(vector2), 'The two vectors have different length'
    dist = 0.0
    for i in range(0,len(vector1)):
        dist+=(vector1[i]-vector2[i])**2
    return math.sqrt(dist)


def argmin(data, function, extra_data=None):
    DEBUG = False
    if DEBUG: print 'data is',data
    if DEBUG: print 'extra data is',extra_data
    res = []
    if extra_data:
        for d in data:
            res.append(function(d,extra_data))
    else:        
        for d in data:
            res.append(function(d))
    if DEBUG:print res
    
    min = res[0]
    arg = 0
    for i in range(0, len(res)):
        if res[i]<min:
            min = res[i]
            arg = i
        if DEBUG: print 'min is',min            
            
    return arg


def argmax(data, function, extra_data=None):
    DEBUG = False
    if DEBUG: print 'data is',data
    if DEBUG: print 'extra data is',extra_data
    res = []
    if extra_data:
        for d in data:
            res.append(function(d,extra_data))
    else:        
        for d in data:
            res.append(function(d))
    if DEBUG:print res
    
    max = res[0]
    arg = 0
    for i in range(0, len(res)):
        if res[i]>max:
            max = res[i]
            arg = i
        if DEBUG: print 'max is',max
            
    return arg

inv_angle = lambda x1,x2:(1-angle(x1,x2))

import random

def kmeans(data, distance=inv_angle,number_of_clusters=4, global_optimizations=5):
    """implementation of the k-means algorithm. 
    Expects a list of tuples that are the data vectors, a distance function, and the number of clusters
    It returns the centroids of the clusters"""
    global DEBUG
    centroids=[]
    clusters=[]
    clusters_data = {}
    #choose the first data as the initial centroids
    for idx_cluster in range(0,number_of_clusters):
        centroids.append(data[idx_cluster])
        clusters.append([])
    
    len_of_data_vector = len(data[0])

    #we iterate until there are no movements
    global_opt_count = 0
    global_opts_data = []
    changed = True
    zero_cluster=False
    
    while(changed and global_opt_count < global_optimizations):
        movements = 0
        changed=False
        if DEBUG: print 'Centroids are:',centroids
        print '#',
        for d in data:
            #to which centroid is this nearest ?
            nearest_centroid = argmin(centroids,distance,d)
            if DEBUG: print d,'is nearest to centroid',nearest_centroid
            #let's move it to the relevant cluster      
            #but first we need to need to check if it's already there
            try:
                if not nearest_centroid==clusters_data[d]:
                    # remove the data from the current cluster list
                    clusters[clusters_data[d]].remove(d)
                    # put the data in the new cluster list, and update the cache
                    clusters[nearest_centroid].append(d)
                    clusters_data[d] = nearest_centroid
                    changed=True
                    movements+=1
            except KeyError:
                #this will be raised in the first iteration only
                #it is used to initialize the data structures
                clusters_data[d] = nearest_centroid
                clusters[nearest_centroid].append(d)        
                changed=True
                movements+=1
        if DEBUG: print 'After calculation, we have:',clusters_data
        if DEBUG: print '\n',movements,'movements'
        if changed:                
        #lets_recalculate the centroids
            if DEBUG:print 'clusters are',clusters
            if DEBUG:print '-',
            for idx_cluster in range(0, len(clusters)):
                if DEBUG:print '.',idx_cluster,
                if DEBUG:print 'calculating centroid for cluster',clusters[idx_cluster]
                if len(clusters[idx_cluster])==0: 
                    print 'GOT ZERO LENGTH CLUSTER';
#                    del centroids[idx_cluster]
                    centroids[idx_cluster] = data[int(random.random()*(number_of_clusters-1))]
#                    zero_cluster = True
                    continue
                #initialize the new centroid
                new_centroid = []
                for j in range(0, len_of_data_vector):
                    new_centroid.append(0.0)
                #calculate the new average
                for d in clusters[idx_cluster]:
                    #d is a list of values
                    for j in range(0,len_of_data_vector):
                        new_centroid[j]+=d[j]
                #normalize the new centroid
                for j in range(0, len_of_data_vector):
                    new_centroid[j]/=len(clusters[idx_cluster])
                #finally, put it in the list
                centroids[idx_cluster]=new_centroid
        if (not changed) or zero_cluster:
            if DEBUG: print 'Global optimization'
            # if DEBUG:print 'Gloval opt'
            global_opt_count+=1
            changed=True

            if not zero_cluster:
                global_opts_data.append({'centroids':copy.copy(centroids), 
                                     'clusters_len':[len(cl) for cl in clusters]})
                new_centroids = []
                for idx,cent in enumerate(centroids):
                    new_centroids.append(clusters[idx][argmax(clusters[idx],distance,cent)])
                new_centroids=[[(c-(c * random.random()))+random.random() for c in cent] for cent in new_centroids]
#                print 'new:',new_centroids
#                print 'old:',centroids
            else:
                zero_cluster = False
                new_centroids = []
                for idx_cluster in range(0,number_of_clusters):
                    new_centroids.append(data[int(random.random()*(number_of_clusters-1))])

            #let the new centroids be the more distant data of the clusters:
                
            centroids = new_centroids
            clusters=[]
            clusters_data = {}
            for idx_cluster in range(0,number_of_clusters):
                clusters.append([])
            
                            
    if global_optimizations:
        min_avg = []        
        #the best run is the one where the differnece between 
        #the smallest and the largest cluster is the least
        for data in global_opts_data:
            if DEBUG: print data
            min_avg.append(max(data['clusters_len'])-min(data['clusters_len']))
        if DEBUG: print min_avg
#        print min_avg
#        print min(min_avg)
        return global_opts_data[min_avg.index(min(min_avg))]['centroids']
        
               
            
                                

    return centroids      

import re
datadir = 'data/'
def classify_words(word_list=datadir+'word_list3.txt',    
                   distance = inv_angle, number_of_clusters=3, 
                   cluster_format=datadir+'cl%s.txt',
                   char_weights=char_inputs.weights, extra_weight=0, word_dict={}, global_opts=20):

    grkl_file = open(word_list, 'rb')
    files = []
    for i in range(0,number_of_clusters):
        files.append(open(cluster_format%str(i), 'wb'))
    contents = grkl_file.read()
    if contents.startswith('\xef\xbb\xbf'):
        contents = contents[3:] #strip BOM       
    contents = unicode(contents, 'utf-8')
    pattern = re.compile(r'(\w+) - (\w+) - (\d+) - ', re.UNICODE)
    data = []
    for line in contents.splitlines():
        m = pattern.match(line)
        if m == None: continue
        grkl_word = m.group(1)
        data.append(vector(grkl_word.lower(),char_weights))
    
    centroids = kmeans(data,distance,number_of_clusters, global_opts)
    print centroids
    
    for line in contents.splitlines():
        m = pattern.match(line)
        if m == None: continue
        grkl_word = m.group(1)
        greek_word = m.group(2)
        count = word_dict.get(grkl_word, 1)
        
        vec = vector(grkl_word.lower(),char_weights)
        type = argmin(centroids,distance,vec)
        files[type].write(grkl_word)
        files[type].write(' - ')
        files[type].write(greek_word.encode('utf-8'))
        files[type].write(' - ')
        if extra_weight:
            c = int(count)*int(extra_weight)
            files[type].write(str(c))                
        else:
            files[type].write(str(count))            
        files[type].write(' - ')                
        files[type].write('\r\n')  
        
        if extra_weight:
            for t, f in enumerate(files):
                if t==type: continue
                f.write(grkl_word)
                f.write(' - ')
                f.write(greek_word.encode('utf-8'))
                f.write(' - ')
                f.write(str(count))                
                f.write(' - ')                
                f.write('\r\n')  
        
    return centroids

def test_angle():
    v1=[1,0,0]
    v2=[1,0,0]
    assert angle(v1,v2)==1
    v1,v2=[0,1,0],[1,0,0]
    assert angle(v1,v2)==0
    v1,v2=[1,1,0],[1,0,0]
    print angle(v1,v2)
    
def test_vector():
    print vector('orestis',{})
    print vector('john',{})
    print vector('jespao',{})
    print vector('gamiesai',{})   
    
def test():
    print angle(vector('galinio',{}),vector('pareksigimeno',{})) 
    print angle(vector('axies',{}),vector('diprosopos',{})) 
    print angle(vector('krufo',{}),vector('matia',{})) 
    print angle(vector('katargisi',{}),vector('sigklonistikame',{})) 
    
def test_argmin():
    print '### START TEST ARGMIN ###'
    data = [(1,1),
            (2,1),
            (4,3),
            (5,4),
            ]
    func = lambda x1,x2:math.sqrt((x1[0]-x2[0])**2+(x1[1]-x2[1])**2)
    print func(data[0],data[0])
    print func(data[0],data[1])
    print func(data[0],data[2])    
    print func(data[0],data[3])  
    print argmin(data, func, data[0])  
    print func(data[1],data[0])
    print func(data[1],data[1])
    print func(data[1],data[2])    
    print func(data[1],data[3])  
    print argmin(data, func, data[1])  
    print '### END TEST ARGMIN ###'    
    
def test_kmeans():
    data = [(1,1),
            (2,1),
            (2,2),
            (4,4),
            (4,3),
            (5,4),
            (10,1),
            (1,10),
            (4,7),
            ]
    
    print kmeans(data, eucl_distance, 2, 20)
    
if __name__ == '__main__':
#    DEBUG = True
#    test_angle()
#    test_vector()
#    test()
#    test_argmin()
    test_kmeans()  
    classify_words(word_list=datadir+'word_list3.txt',    
                   distance = eucl_distance, number_of_clusters=60, 
                   cluster_format=datadir+'test_cl%s.txt',
                   char_weights=char_inputs.weights, extra_weight=10, word_dict={}, global_opts=20)  