r'''
k均值算法是一种聚类算法，通过迭代计算数据k个聚集点。大致步骤如下：

1. 给出k个初始中心点
2. 根据中心点将数据分成k个集合，划分依据使数据点与该中心点距离最近。
3. 计算k个集合的新中心点。

   a) 新中心点与旧中心点不同，则进入步骤2
   b) 如果相同，则完成。


.. math::
    
    \hat{x} = \frac{x - x_{min}} {x_{max} - x_{min}}

'''

import numpy as np

from . import misc


def _random_centroids(k, xs):
    '''在数据xs中找出k个随机点
    
    
    '''
    centroids = np.zeros([k, len(xs[0,:])])

    for i in range(len(xs[0,:])):
        limits = (np.min(xs[:,i]), np.max(xs[:,i]))
        centroids[:,i] = limits[0] + (limits[1] - limits[0])*np.random.rand(1,k)
    return centroids

def k_means(xs, k, distance=misc.euclidean, centroids=None):
    '''k均值聚类算法

    :param xs: numpy.array([m,n]), 数据集合，m表示数据个数，n表述属性个数
    :param k: int，聚类k个中心
    :param distance: function，距离公式，默认欧氏距离
    :param centroids: numpy.array([k,len(xs[0,:])]，初始化k个中心点，默认随机取点
    :returns cs: 数据xs对应的中心点，理解为数据分类，数据格式numpy.array(len(xs),"int")
    :returns centroids: numpy.array([k,len(xs[0,:])]，收敛的k个中心点，
    :returns errors: numpy.array(len(xs), "float64") 数据距离所属中心点的距离
    '''
    centroids =  _random_centroids(k, xs) if centroids == None else centroids
    cs = np.zeros(len(xs), "int")
    errors = np.zeros(len(xs), "float64")

    while True:
        changed = False
        for i in range(len(cs)):
            ds = distance(xs[i,:], centroids)
            n = np.argsort(ds)[0]
            errors[i] = ds[n]
            if n != cs[i]:
                changed = True
                cs[i] = n

        if changed:
            for i in range(len(centroids)):
                centroids[i,:] = np.mean(xs[cs==i], axis=0)
        else:
            break

    return cs, centroids, errors

def bisecting_k_means(xs, k, distance=misc.euclidean):
    centroids = np.zeros([k, len(xs[0,:])], "float64")
    cs = np.zeros(len(xs), "int")
    errors = np.zeros(len(xs), "float64")

    for i in range(1, k):
        (b_sse, b_j, b_cs, b_centroids, b_errors) = (0, -1, None, None, None)

        for j in range(i):
            (sub_cs, sub_centroids, sub_errors) = k_means(xs[cs==j,:], 2, distance)
            sse = np.sum(sub_errors) + np.sum(errors[cs==j])
            if sse > b_sse:
                (b_sse, b_j, b_cs, b_centroids, b_errors) = (sse, j, sub_cs, sub_centroids, sub_errors)
        
        centroids[b_j,:] = b_centroids[0,:]
        centroids[i,:] = b_centroids[1,:]

        i_b_0 = np.array(range(len(xs)))[cs==b_j][b_cs==0]
        i_b_1 = np.array(range(len(xs)))[cs==b_j][b_cs==1]
        
        cs[i_b_1] = i

        errors[i_b_0] = b_errors[b_cs == 0]
        errors[i_b_1] = b_errors[b_cs == 1]
        

    return cs, centroids, errors

def _test():
    import data as mdata
    xs = mdata.get_sample("2d_points_0")
    xs = np.array(xs, 'float64')

    (cs, centroids, errors) = bisecting_k_means(xs, 5)
    print(centroids)
    import graph
    graph.show_centroids(xs, cs,centroids)

    

if __name__ == "__main__":
    _test()

