import numpy as np
from sklearn.cluster import KMeans as sk_kmean


def load_data(file):
    with open(file, 'r') as f:
        file_data = f.readlines()

    data_arr = [info[: -1].split('\t') for info in file_data]

    # return [[float(num) for num in info] for info in data_arr]
    return [list(map(float, info)) for info in data_arr]


class KMean(object):
    @staticmethod
    def distance(vec_a, vec_b):
        """
        向量距离：
        :param vec_a:
        :param vec_b:
        :return:
        """
        def euclidean(vec_a, vec_b):
            """
            欧式距离：sqrt(sum((a-b)^2))

            :param vec_a:
            :param vec_b:
            :return:
            """
            return np.sqrt(np.power(vec_a - vec_b, 2).sum())

        def manhattan(vec_a, vec_b):
            """
            曼哈顿距离(城市街区距离)：sum(|a - b|)

            :param vec_a:
            :param vec_b:
            :return:
            """
            return sum(abs(vec_a - vec_b))

        def chebyshev(vec_a, vec_b):
            """
            切比雪夫距离：max(abs|a - b|)

            :param vec_a:
            :param vec_b:
            :return:
            """
            return max(abs(vec_a - vec_b))

        def minkowski(vec_a, vec_b, p=1):
            """
            闵可夫斯基距离：np.power(sum(abs(a - b)^p), 1/p)
            其中：
            * p = 1: 曼哈顿距离
            * p = 2: 欧式距离
            * p --> 无穷: 切比雪夫距离

            缺点：
            *  将各个分量的量纲(scale，也就是单位)当作相同的看待（有事物理意义并不是一回事，例如：身高和体重）
            *  没有考虑各个分量的分布（期望，方差等）可能是不同的

            :param vec_a:
            :param vec_b:
            :param p:
            :return:
            """
            return np.power(sum(np.power(abs(vec_a - vec_b), p)), 1/p)

        def standard_euclidean(vec_a, vec_b):
            """
            标准欧式距离：sqrt(sum(((a - b) / s)^2))
            解决问题：各维分量的分布不一样

            :param vec_a:
            :param vec_b:
            :return:
            """
            s = []
            for arr in list(zip(vec_a, vec_b)):
                s_std = np.std(arr)
                s.append(s_std)

            sum_squares = np.power((vec_a - vec_b) / np.array(s), 2).sum()
            return np.sqrt(sum_squares)

        def mahalanobis(vec_a, vec_b):
            """
            马氏距离:sqrt((x-均值).T * (x协方差矩阵的逆) * (x-均值))

            :param vec_a:
            :param vec_b:
            :return:
            """
            x = np.vstack((vec_a, vec_b))
            xt = x.T
            cov_1_d = np.linalg.inv(np.cov(xt))
            mid_rs = np.dot((vec_a - vec_b).T, cov_1_d)
            rs = np.dot(mid_rs, (vec_a - vec_b))
            return np.sqrt(rs)

        def cosine(vec_a, vec_b):
            """
            夹角余弦： cos(theta) = (a.b) / (|a|*|b|)

            :param vec_a:
            :param vec_b:
            :return:
            """
            molecules = np.dot(vec_a, vec_b)

            vec_a_sum = np.power(vec_a, 2).sum()
            vec_b_sum = np.power(vec_b, 2).sum()
            denominators = np.sqrt(vec_a_sum) * np.sqrt(vec_b_sum)

            return molecules / denominators

        def hamming(vec_a, vec_b):
            """
            汉明距离：
            两个等长字符串s1与s2之间的汉明距离定义为将其中一个变为另外一个所需要作的最小替换次数。例如字符串“1111”与“1001”之间的汉明距离为2
            :param vec_a:
            :param vec_b:
            :return:
            """
            pass

        if not isinstance(vec_a, np.ndarray):
            vec_a = np.array(vec_a)
        if not isinstance(vec_b, np.ndarray):
            vec_b = np.array(vec_b)

        # 欧氏距离
        return euclidean(vec_a, vec_b)

    @staticmethod
    def get_random_center(data, k):
        """
        随机生成初始化的k各簇心
        :param data:
        :param k:
        :return:
        """
        center = []
        data_t = np.array(data).T
        for info in data_t:
            range_info = max(info) - min(info)
            mid_center = min(info) + range_info * np.random.rand(k)
            center.append(mid_center)
        return np.array(center).T

    def k_mean(self, data, k):
        """

        :param data:
        :param k:
        :return:
        """
        cluster_info = np.zeros((len(data), 2))
        center = self.get_random_center(data, k)
        cluster_changed = True
        while cluster_changed:
            cluster_changed = False
            # 逐个点的分类
            for num, info in enumerate(data):
                min_dist = np.inf
                min_index = -1
                # 逐个计算当前的点与k个簇的簇心距离，并选择最近的簇，将其归为该类
                for j in range(k):
                    dist_j = self.distance(center[j], info)
                    if dist_j < min_dist:
                        min_dist = dist_j
                        min_index = j
                if cluster_info[num][0] != min_index:
                    cluster_changed = True
                cluster_info[num] = min_index, np.power(min_dist, 2)
            # 重新计算簇心
            for cent in range(k):
                # 找出在cent簇的所有树  np.nonzero返回为tuple
                cent_data_index = np.nonzero(np.array(cluster_info)[:, 0] == cent)[0]
                center_cent = np.array(data)[cent_data_index]
                center[cent] = center_cent.mean(axis=0)
        return center, cluster_info

    def bin_k_mean(self, data, k):
        """
        二分k-mean聚类：sse为距离的开方

        解决k-mean收敛于局部最小的情况：
        1、对簇的评估：SSE（最小均方误）
        2、优化：拆分SSE最大的簇为2个簇，为了保持簇量不变，将这两个簇中的一个，与其他簇合并，合并方法：
                （将原始数据看作一个簇，然后一分为二的裂变拆分，直到簇数达到目标值位置）
            1）整体最近的质心：计算质心距离
            2）整体最小SSE的质心：计算每个拆分后的整体SSE

        """
        cluster_info = np.zeros((len(data), 2))
        # 初始化为质心（整体数据一个簇）
        center_init = np.array(data).mean(axis=0)
        center_list = [center_init]
        for num, info in enumerate(data):
            cluster_info[num][1] = np.power(self.distance(center_init, info), 2)

        while len(center_list) < k:
            # 1分为2的分簇
            lowest_sse = np.inf
            best_center_split = 0
            best_new_center = []
            best_clust_sse = np.array(([[]]))

            for center_num, center in enumerate(center_list):
                # 找出在质心为cent的所有树  np.nonzero返回为tuple
                cent_data_index = np.nonzero(np.array(cluster_info)[:, 0] == center_num)[0]
                center_cent = np.array(data)[cent_data_index]
                # 当前簇一分为二
                split_center, split_cluster_info = self.k_mean(data=center_cent, k=2)
                split_cluster_sse = split_cluster_info[:, 1].sum()
                # 剩下簇的sse
                last_cluster_index = np.nonzero(np.array(cluster_info)[:, 0] != center_num)[0]
                last_cluster_sse = np.array(cluster_info)[last_cluster_index][:, 1].sum()

                if (split_cluster_sse + last_cluster_sse) < lowest_sse:
                    best_center_split = center_num
                    best_new_center = split_center
                    best_clust_sse = split_cluster_info.copy()
                    lowest_sse = split_cluster_sse + last_cluster_sse

            # 更新簇
            if len(best_clust_sse) == 1:
                raise NameError('无法再切分簇')
            else:
                # 更新质心
                center_list[best_center_split] = best_new_center[0]
                center_list.append(best_new_center[1])

                # 更新分簇
                # 修改best cluster内的所属簇号：0-->best_center_split, 1-->新号（len）
                cluster_0_index = np.nonzero(np.array(best_clust_sse)[:, 0] == 0)[0]
                cluster_1_index = np.nonzero(np.array(best_clust_sse)[:, 0] == 1)[0]

                best_clust_sse[cluster_0_index][:, 0] = best_center_split
                best_clust_sse[cluster_1_index][:, 0] = len(center_list)

                # 找出在cluster_info的第best_center_split行，替换为best_clust_sse  np.nonzero返回为tuple
                cluster_replace_index = np.nonzero(np.array(cluster_info)[:, 0] == best_center_split)[0]
                cluster_info[cluster_replace_index] = best_clust_sse

        return center_list, cluster_info


def run_main(data_set, k):
    """
    同时使用kmean，二分kmean和sklearn kmean进行预测
    :param data_set:
    :param k:
    :return:
    """
    k_mean = KMean()
    center, cluster_info = k_mean.k_mean(data=data_set, k=k)
    print('kmean: {}'.format(center))

    bin_center, bin_cluster_info = k_mean.bin_k_mean(data=data_set, k=k)
    print('bin_kmean: {}'.format(bin_center))

    # sklearn kmean
    sk_k_mean = sk_kmean(n_clusters=k)
    sk_k_mean.fit(data_set)
    print('sklearn: {}'.format(sk_k_mean.cluster_centers_))


def run_simple():
    file = './data/B/chp10/testSet2.txt'
    data_set = load_data(file)

    run_main(data_set, k=3)


if __name__ == '__main__':
    run_simple()
