#!/usr/bin/env python 
#-*-encoding:utf8-*-


import time

import numpy as np
import matplotlib.pyplot as plt
from sklearn import cluster,metrics
from itertools import cycle

def get_src_data():
    X = []
    with open('./data/testSet2.txt','r') as f:
        f.next()
        for line in f:
            a,b = line.strip().split(',')
            X.append([float(a),float(b)])
    return X

if __name__ == '__main__':
    src = get_src_data()
    print len(src)
    #print src
    
    """
    k_Means cluster algorithm.
    """
    #///define KM model.
    KM = cluster.KMeans(n_clusters=80)
    
    #///create model
    pars = KM.fit(src)

    #///sample labels in model.
    label = pars.labels_
    
    #///cluster center.
    cluster_centers = pars.cluster_centers_

    #///method to obtain unique sequence using numpy.
    labels_unique = np.unique(pars.labels_)

    n_clusters = len(labels_unique)

    #///create figure instance, define figure name.
    plt.figure('k_Means')
    
    #///define color iterater.
    colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')

    #///cycle k and colors. plot gragh
    for k,col in zip(range(n_clusters),colors):
        x_axis = []
        y_axis = []
        i = 0
        cluster_center = cluster_centers[k]

        for l in label:
            if l == k:
                x_axis.append(src[i][0])
                y_axis.append(src[i][1])
            i = i+1
        #print x_axis,y_axis
        
        #///format: plt.plot([x1,x2,x3,x...xn],[y1,y2,y3,...yn],'r.') 
        plt.plot(x_axis,y_axis,col+'.') 
        plt.plot(cluster_center[0], cluster_center[1], 'o')
        #plt.plot(x_axis,y_axis,col+'.',markerfacecolor='b',markeredgecolor='k',markersize=3)
    plt.title('cluster: %d' % n_clusters)
    #plt.show()
    plt.close()
    
    #///apply prediction, and evalue the results.
    pred = pars.predict(src)
    print label
    print pred
    labels_true = label
    labels_pred = pred
    print metrics.adjusted_rand_score(labels_true, labels_pred)
    print metrics.adjusted_mutual_info_score(labels_true,labels_pred)
    print metrics.normalized_mutual_info_score(labels_true, labels_pred)
    print metrics.homogeneity_completeness_v_measure(labels_true, labels_pred)

    """
    Silhouette refers to a method of interpretation and validation of 
    consistency within clusters of data. The technique provides a 
    succinct graphical representation of how well each object lies within 
    its cluster. It was first described by Peter J. Rousseeuw in 1986.
    
    The silhouette value is a measure of how similar an object is to its 
    own cluster (cohesion) compared to other clusters (separation). 
    The silhouette ranges from -1 to 1, 
    where a high value a indicates that the object is well matched to its 
    own cluster and badly matched to neighboring clusters. If most objects
    have a high value, then the clustering configuration is appropriate. 
    If many points have a low or negative value, then the clustering 
    configuration may have too many or too few clusters.

    """
    print metrics.silhouette_score(src, label, metric='euclidean')

