import numpy as np
import pandas as pd
class KMeans:
    def __init__(self,data,num_clusters):
        self.data = data
        self.num_clusters = num_clusters
        
    def train(self,max_iterations):
        #1.先随机选择K个中心点
        centroids = KMeans.centroids_init(self.data, self.num_clusters)
        #2.开始训练
        num_examples = self.data.shape[0]
        closest_centroids_ids = np.empty((num_examples,1)) #初始化样本点到中心点距离存储数组
        '''np.empty()，以为创建一个空的多维数组,empty(shape, dtype=float, order='C')'''
        # for _ in range(max_iterations):
        #     #3得到当前每一个样本点到K个中心点的距离，找到最近的索引
        #     closest_centroids_ids = KMeans.centroids_find_closest(self.data,centroids)
        #     #4.进行中心点位置更新
        #     centroids = KMeans.centroids_compute(self.data, closest_centroids_ids, self.num_clusters)
        # '''输出分区结果并保存'''
        # # np.savetxt('../data/csv/Partition_wp=0.8.csv', np.array(closest_centroids_ids), header='Partition',fmt="%.2f", delimiter='\n')
        # # np.savetxt('../data/csv/Centroids_wp=0.8.csv', np.array(centroids), fmt="%.6f", delimiter='\n')
        # # df = pd.read_csv("../data/csv/Partition_wp=0.8.csv")
        # # df2 = pd.read_csv('../data/testData/point.csv')
        # # df2["Partition"] =df['# Partition']
        # # df2.to_csv("../data/csv/D_Partition_wp=0.8.csv", mode='a', index=False)
        # return centroids,closest_centroids_ids
        clusterChanged = True  # 用来判断聚类是否已经收敛
        while clusterChanged:
            flag = 0
            for _ in range(max_iterations):

                centroids1=centroids
                #3得到当前每一个样本点到K个中心点的距离，找到最近的索引
                closest_centroids_ids = KMeans.centroids_find_closest(self.data,centroids)
                #4.进行中心点位置更新
                centroids = KMeans.centroids_compute(self.data, closest_centroids_ids, self.num_clusters)
                flag+=1
                if (centroids==centroids1).all():
                    break
            print('本次迭代次数为：',flag)
            clusterChanged = False
        '''输出分区结果并保存'''
        np.savetxt('../data/csv/Partition_wp=0.8.csv', np.array(closest_centroids_ids), header='Partition',fmt="%.2f", delimiter='\n')
        np.savetxt('../data/csv/Centroids_wp=0.8.csv', np.array(centroids), fmt="%.6f", delimiter='\n')
        df = pd.read_csv("../data/csv/Partition_wp=0.8.csv")
        df2 = pd.read_csv('../data/testData/double-constrain.csv')
        df2["Partition"] =df['# Partition']
        df2.to_csv("../data/csv/D_Partition_wp=0.8.csv", mode='a', index=False)
        return centroids,closest_centroids_ids

    @staticmethod    
    def centroids_init(data, num_clusters):
        num_examples = data.shape[0]
        random_ids = np.random.permutation(num_examples)
        centroids = data[random_ids[:num_clusters], :]  # : 表示取所有维度的值
        return centroids
    @staticmethod
    def centroids_find_closest(data,centroids):
        num_examples = data.shape[0]
        # num_class=data.shape[1]
        num_centroids = centroids.shape[0]
        closest_centroids_ids = np.zeros((num_examples,1))  #初始化
        for example_index in range(num_examples):
            distance = np.zeros((num_centroids,1))
            for centroid_index in range(num_centroids):
                Wp=0.8;Wa=1-Wp
                DoubleDis_X=(data[example_index,11]-centroids[centroid_index,11])**2
                DoubleDis_Y = (data[example_index, 12] - centroids[centroid_index, 12]) ** 2
                DoubleDis1=Wp*((DoubleDis_X+DoubleDis_Y)**0.5)
                DoubleDis_GDP = 0.16 * ((data[example_index, 3:4] - centroids[centroid_index, 3:4]) ** 2)
                DoubleDis_Pop = 0.16 * ((data[example_index, 10:11] - centroids[centroid_index, 10:11]) ** 2)
                DoubleDis_Asp = 0.02* ((data[example_index, 4:5] - centroids[centroid_index, 4:5]) ** 2)
                DoubleDis_Dem = 0.04* ((data[example_index, 8:9] - centroids[centroid_index, 8:9]) ** 2)
                DoubleDis_Slope = 0.14 * ((data[example_index, 9:10] - centroids[centroid_index, 9:10]) ** 2)
                DoubleDis_Rail = 0.16 * ((data[example_index, 5:6] - centroids[centroid_index, 5:6]) ** 2)
                DoubleDis_Road = 0.16 * ((data[example_index, 6:7] - centroids[centroid_index, 6:7]) ** 2)
                DoubleDis_School = 0.16 * ((data[example_index, 7:8] - centroids[centroid_index, 7:8]) ** 2)
                DoubleDis2=Wa*((DoubleDis_GDP+DoubleDis_Asp+DoubleDis_Rail+DoubleDis_Road+DoubleDis_School+DoubleDis_Dem+DoubleDis_Slope+DoubleDis_Pop)**0.5)
                distance[centroid_index] = DoubleDis1+DoubleDis2
                # distance[centroid_index] = DoubleDis1
            closest_centroids_ids[example_index] = np.argmin(distance)
            '''np.argmin,给出水平方向最小值的下标'''
        return closest_centroids_ids
    @staticmethod
    def centroids_compute(data, closest_centroids_ids, num_clusters):
        num_features = data.shape[1]
        centroids = np.zeros((num_clusters, num_features))
        for centroid_id in range(num_clusters):
            closest_ids = closest_centroids_ids == centroid_id
            centroids[centroid_id] = np.mean(data[closest_ids.flatten(),:],axis=0)
            '''flatten是numpy.ndarray.flatten的一个函数，即返回一个一维数组
            flatten只能适用于numpy对象，即array或者mat，普通的list列表不适用！'''
        return centroids
            
        
        
        