import copy
import common
import random
import numpy as np


####################################################
#clustering kmeans step by step
#def cluster_points(X, mu):
#    clusters  = {}
#    for x in X:
#        bestmukey = min([(i[0], np.linalg.norm(x-mu[i[0]])) \
#                    for i in enumerate(mu)], key=lambda t:t[1])[0]
#        try:
#            clusters[bestmukey].append(x)
#        except KeyError:
#            clusters[bestmukey] = [x]
#    return clusters
#    
#def reevaluate_centers(mu, clusters):
#    newmu = []
#    keys = sorted(clusters.keys())
#    for k in keys:
#        newmu.append(np.mean(clusters[k], axis = 0))
#    return newmu  
#
#def has_converged(mu, oldmu):
#    return (set([tuple(a) for a in mu]) == set([tuple(a) for a in oldmu]))    
#    
#def find_centers(X, K):
#    # Initialize to K random centers
#    oldmu = random.sample(X, K)
#    mu = random.sample(X, K)
#    while not has_converged(mu, oldmu):
#        oldmu = mu
#        # Assign all points in X to clusters
#        clusters = cluster_points(X, mu)
#        # Reevaluate centers
#        mu = reevaluate_centers(oldmu, clusters)
#    return(mu, clusters)
#    
#clusters =  cluster_points(sklearn_transf, xrange(0, 20))
#
#colour_list = OrderedDict()
#for idx in xrange(0, len(clusters)):
#    for cluster in clusters.keys():
##        print clusters[cluster]
#        for s_cluster in clusters[cluster]:
#            if all(item in s_cluster for item in sklearn_transf[idx]):
#                colour_list[scan_data_binned_frequent.keys()[idx]] = my_colors[cluster]
#                break;

def find_k_most_different_vect(rows, k):
    idx_arr = []
    first_vec = copy.deepcopy(rows[0])
    result = []
    result.append(rows[0].tolist())
#    print 0.0, result[0:20]
    
    for i in xrange(k - 1):
        max_idx = 0
        max_dist = 0
        for j in xrange(len(rows)):
            distance = common.LMsimilarity(first_vec, rows[j])
            if distance > max_dist:
                max_dist = distance
                max_idx = j
        idx_arr.append(max_idx)
        # calculate the sum of the vectors and continue finding next one
        first_vec = first_vec + rows[max_idx] 
#        print distance, rows[max_idx].tolist()[0:20]
        
    for idx in idx_arr:
        result.append(rows[idx].tolist())
    return result
    
def kcluster(rows, k=3, distance=common.LMsimilarity):# Cited from Programming Collective Intelligence 
    # Determine the minimum and maximum values for each point
    ranges=[(min([row[i] for row in rows]),max([row[i] for row in rows])) for i in range(len(rows[0]))]

    # Create k randomly placed centroids
    clusters=[[random.random( )*(ranges[i][1]-ranges[i][0])+ranges[i][0] for i in range(len(rows[0]))] for j in range(k)]
    # Find K-most different vectors
    clusters = find_k_most_different_vect(rows, k)

    lastmatches=None
    for t in range(5):
        print 'Iteration %d' % t
        bestmatches=[[] for i in range(k)]
        # Find which centroid is the closest for each row
        for j in range(len(rows)):
            row=rows[j]
            bestmatch=0
            for i in range(k):
                d=distance(clusters[i],row)
                if d<distance(clusters[bestmatch],row): 
                    bestmatch=i
            bestmatches[bestmatch].append(j)
        ## If the results are the same as last time, this is complete
        if bestmatches==lastmatches:
            break
        lastmatches=bestmatches

        # Move the centroids to the average of their members
        for i in range(k):
            avgs=[0.0]*len(rows[0])
            if len(bestmatches[i])>0:
                for rowid in bestmatches[i]:
                    for m in range(len(rows[rowid])):
                        avgs[m]+=rows[rowid][m]
                for j in range(len(avgs)):
                    avgs[j]/=len(bestmatches[i])
                clusters[i]=avgs
    return [clusters, bestmatches]          

def global_impact(centers, clusters, X):
    global_impact = []
    for idx in range(0, len(clusters)):
        distorsion = []
        for item in clusters[idx]:
            distorsion.append(common.LMsimilarity(centers[idx], X[item]))
        global_impact.append(sum(distorsion))
    return sum(global_impact)

def weight_factor(attr, k, old_alfa):
    if k == 2 and attr > 1:
        return 1 - (3 / (4 * attr))
    return old_alfa + ((1 - old_alfa) / 6)
    
def real_distortion(global_imp, old_global_imp, k, alfa):
    if k == 1 or k-1 == 0:
        return 1
    return global_imp / (alfa * old_global_imp)
    
def gap_statistic_my(X):
    # taken from https://datasciencelab.wordpress.com/2014/01/21/selection-of-k-in-k-means-clustering-reloaded/
    ks = range(1,5)
    global_imp = np.zeros(len(ks))
    fk = np.zeros(len(ks))
    alfa = np.zeros(len(ks))
    for indk, k in enumerate(ks):
        mu, clusters = kcluster(X,k)
        global_imp[indk] = global_impact(mu, clusters, X)
        if indk - 1 >= 0:
            old_alfa = alfa[indk - 1]
        else:
            old_alfa = 0
        alfa[indk] = weight_factor(len(X[0]), k, old_alfa)
        fk[indk] = real_distortion(global_imp[indk], global_imp[indk - 1], k, alfa[indk])
        
    return(ks, global_imp, fk, alfa)

