#!/usr/bin/env python
# -*- coding:utf-8 -*-

from numpy import *
import time
#import matplotlib.pyplot as plt


# compute Euclidean distance
def euclidean_distance(vec1, vec2):
    # return sqrt(sum(power(vec2 - vec1, 2)))
    # return sqrt((vec1 - vec2) * (vec1 - vec2).T)
    # diff = (vec1 - vec2) ** 2
    # dist = diff.sum()
    # distance = dist**0.5
    distance = sqrt(sum(asarray(vec1 - vec2) ** 2))
    return distance

# init centroids with random samples
def init_centroids(data_set, k):
    samples_num, dim = data_set.shape
    centroids = zeros((k, dim))
    for i in range(k):
        index = int(random.uniform(0, samples_num))
        centroids[i, :] = data_set[index, :]
    return centroids


# k-means cluster
def kmeans(data_set, k):
    samples_num = data_set.shape[0]
    # first column stores which cluster this sample belongs to,
    # second column stores the error between this sample and its centroid (it's value is distance ** 2)
    cluster_assignment = mat(zeros((samples_num, 2)))
    cluster_changed = True

    # step 1: init centroids
    centroids = init_centroids(data_set, k)

    while cluster_changed:
        cluster_changed = False
        # for each sample
        for i in xrange(samples_num):
            min_dist = 100000.0
            min_dist_cluster_index = 0
            # for each centroid
            # step 2: find the centroid who is closest
            for j in range(k):
                distance = euclidean_distance(centroids[j, :], data_set[i, :])
                if distance < min_dist:
                    min_dist = distance
                    min_dist_cluster_index = j

            # step 3: update its cluster
            if cluster_assignment[i, 0] != min_dist_cluster_index:
                cluster_changed = True
                cluster_assignment[i, :] = min_dist_cluster_index, min_dist ** 2

        # step 4: update centroids
        for j in range(k):
            # numpy.matrix.A function is return an ndarray object, which is same as matrix.getA() function
            points_in_cluster = data_set[nonzero(cluster_assignment[:, 0].A == j)[0]]
            centroids[j, :] = mean(points_in_cluster, axis=0)

    print 'Congratulations, cluster complete!'
    return centroids, cluster_assignment


# show your cluster only available with 2-D data
def show_cluster(data_set, k, centroids, cluster_assignment):
    samples_num, dim = data_set.shape
    if dim != 2:
        print "Sorry! I can not draw because the dimension of your data is not 2!"
        return 1

    mark = ['or', 'ob', 'og', 'ok', '^r', '+r', 'sr', 'dr', '<r', 'pr']
    if k > len(mark):
        print "Sorry! Your k is too large!"
        return 1

        # draw all samples
    for i in xrange(samples_num):
        mark_index = int(cluster_assignment[i, 0])
        plt.plot(data_set[i, 0], data_set[i, 1], mark[mark_index])

    mark = ['Dr', 'Db', 'Dg', 'Dk', '^b', '+b', 'sb', 'db', '<b', 'pb']
    # draw the centroids
    for i in range(k):
        plt.plot(centroids[i, 0], centroids[i, 1], mark[i], markersize=12)

    plt.show()
