
# coding: utf-8

# In[1]:

#!usr/bin/python

print("你好，欢迎学习非监督学习算法——k-means聚类算法")


# In[2]:

from numpy import*
def loadDataSet(fileName):
    dataMat=[]
    fr=open(fileName)
    for line in fr.readlines():
        curLine=line.strip().split('\t')
        fltLine=list(map(float,curLine))  # 注意在Python3中使用map时，将其转化为对应的list，否则报错
        dataMat.append(fltLine)
    return dataMat
def distEclud(vecA,vecB):
    return sqrt(sum(power(vecA-vecB,2)))
def randCent(dataSet,k):   # 创建初始质心
    n=shape(dataSet)[1]
    centroids=mat(zeros((k,n)))
    for j in range(n):
        minJ=min(dataSet[:,j])
        rangeJ=float(max(dataSet[:,j])-minJ)
        centroids[:,j]=minJ+rangeJ*random.rand(k,1)
    return centroids
dataMat=mat(loadDataSet('testSet.txt'))
print(min(dataMat[:,0]))
print(max(dataMat[:,0]))
print(randCent(dataMat,2))
print(distEclud(dataMat[0],dataMat[1]))


# In[3]:

# 下面实现完整的k-means算法，该算法将创建k个质心，再将每个点分配到最近的质心，再重新计算质心,直到簇分配不再改变
def kMeans(dataSet,k,distMeans=distEclud,createCent=randCent):
    m=shape(dataSet)[0]
    clusterAssment=mat(zeros((m,2)))  # 一列记录簇索引值，一列存储误差，其误差来评价聚类的效果
    centroids=createCent(dataSet,k)   # 创建初始质心
    clusterChanged=True    # 迭代标志
    while clusterChanged:
        clusterChanged=False
        for i in range(m):
            minDist=inf;minIndex=-1
            for j in range(k):
                distJI=distMeans(centroids[j,:],dataSet[i,:])
                if distJI<minDist:
                    minDist=distJI;minIndex=j
            if clusterAssment[i,0]!=minIndex:clusterChanged=True
            clusterAssment[i,:]=minIndex,minDist**2
        print(centroids)
        for cent in range(k):  # 遍历所有的质心
            ptsInClust=dataSet[nonzero(clusterAssment[:,0].A==cent)[0]]
            centroids[cent,:]=mean(ptsInClust,axis=0)
    return centroids,clusterAssment       # 计算簇中的平均值，获取更新的质心


# In[4]:

datMat=mat(loadDataSet('testSet.txt'))
myCenteroids,clustAssing=kMeans(datMat,4) 
# 注意输出结果其得到4个质心，经过3次迭代算法收敛


# In[17]:

# 下面将讨论利用分配结果矩阵中的误差来评价聚类质量的方法
# k均值算法收敛但是聚类效果不好的原因是:该算法收敛到了局部最小值，而非全局最小值
# 对簇进行合并：合并最近的质心(计算所有质心之间的距离);合并两个使得SSE增幅最小的质心(对所有可能的两个簇重复处理)
# 二分K-均值算法
def biKmeans(dataSet,k,distMeas=distEclud):
    m=shape(dataSet)[0]
    clusterAssment=mat(zeros((m,2)))
    centroid0=mean(dataSet,axis=0).tolist()[0]
    centList=[centroid0]   # 使用一个列表保留所有的质心
    for j in range(m):
        clusterAssment[j,1]=distMeas(mat(centroid0),dataSet[j,:])**2  #计算误差
    while (len(centList)<k):
        lowestSSE=inf
        for i in range(len(centList)):
            ptsInCurrCluster=dataSet[nonzero(clusterAssment[:,0].A==i)[0],:]
            centroidMat,splitClustAss=kMeans(ptsInCurrCluster,2,distMeas)
            sseSplit=sum(splitClustAss[:,1])  # 计算总误差
            sseNotSplit=sum(clusterAssment[nonzero(clusterAssment[:,0].A!=i)[0],1])
            print(sseSplit,sseNotSplit)
            if (sseSplit+sseNotSplit)<lowestSSE:
                bestCentToSplit=i
                bestNewCents=centroidMat
                bestClustAss=splitClustAss.copy()
                lowestSSE=sseSplit+sseNotSplit      # 一旦决定了要划分的簇，家下来就要实际执行划分操作
        bestClustAss[nonzero(bestClustAss[:,0].A==1)[0],0]=len(centList)
        bestClustAss[nonzero(bestClustAss[:,0].A==0)[0],0]=bestCentToSplit
        print('the bestClustToSplit is:',bestCentToSplit)
        print('the len of bestClustAss is:',len(bestClustAss))
        centList[bestCentToSplit]=bestNewCents[0,:]
        centList.append(bestNewCents[1,:])
        clusterAssment[nonzero(clusterAssment[:,0].A==bestCentToSplit)[0],:]=bestClustAss
    return centList,clusterAssment
datMat3=mat(loadDataSet('testSet2.txt'))
centList,myNewAssments=biKmeans(datMat3,3)
centList


# In[ ]:

# 示例：对地图上的点进行聚类

