import pandas as pd
from kmodes.kmodes import KModes
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats

# https://www.geeksforgeeks.org/k-mode-clustering-in-python/#

def do_kmodes(data, k, modes):
    # 代表每个记录被分配到的质心所属的簇
    clusters = np.zeros(data.shape[0], dtype=int)
    clusters_prev = np.zeros(data.shape[0], dtype=int)

    # 假设最大迭代次数为10
    for i in range(10):
        # Assign data objects to the closest cluster
        for j, object in enumerate(data):
            # 一维数据和一维数据比较，比较的结果是True/False，对应1/0
            distances = np.array([sum(object != mode) for mode in modes]) # size: (1,k)
            # 把第j条数据放入了某个质心对应的簇里
            clusters[j] = np.argmin(distances)

        # Update the cluster modes
        for j in range(k):
            # 一维数组与标量进行比较得到 True/False list，传给data后，若是True，则输出data[k], 若是False，则data[k] 不选中不输出
            # mode 是ModeResult的成员，还有count成员表示每一维度出现的次数
            modes[j] = stats.mode(data[clusters == j]).mode

        # Check if the cluster assignments have converged
        if (clusters == clusters_prev).all():
            break

        # Store the current cluster assignments
        clusters_prev = clusters
    return clusters, modes

def t1_kmode():
    # Define the data set with three categorical variables
    data = np.array([['A', 'B', 'C'],
                    ['B', 'C', 'A'],
                    ['C', 'A', 'B'],
                    ['A', 'C', 'B'],
                    ['A', 'A', 'B']])

    # Choose the number of clusters k
    k = 2

    # Initialize the modes for each cluster
    modes = [['A', 'B', 'C'],
            ['C', 'B', 'A']]
    clusters, M = do_kmodes(data, k, modes)

    print("The cluster assignments for each data object: ", clusters)
    print("Modes for each cluster: ", M)

def t2_int_kmode(k, dim):
    data = np.random.randint(0, 255, (100, dim))
    modes = np.random.randint(0, 255, (k, dim))
    clusters, M = do_kmodes(data, k, modes)
    #print("The cluster assignments for each data object: ", clusters)

    print("Modes for each cluster: ", M)
    # 12 维数据似乎不太好画图

# 利用elbow曲线找最佳的K
# As we can see from the graph there is an elbow-like shape at 2 and 3. Now it we can consider either 2 or 3 cluster
# 这个例子不典型，因为数据集少，K又小
def t3_elbow_curve():
    data = np.array([['A', 'B', 'C'],
                 ['B', 'C', 'A'],
                 ['C', 'A', 'B'],
                 ['A', 'C', 'B'],
                 ['A', 'A', 'B']])
    # Elbow curve to find optimal K
    cost = []
    K = range(1,5)
    for k in list(K):
        kmode = KModes(n_clusters=k, init = "random", n_init = 5, verbose=1)
        kmode.fit_predict(data)
        cost.append(kmode.cost_)
       
    plt.plot(K, cost, 'x-')
    plt.xlabel('No. of clusters')
    plt.ylabel('Cost')
    plt.title('Elbow Curve')
    plt.show()

def t4_KModes_ex():
    # random categorical data
    data = np.random.choice(20, (100, 10))

    # init指定初始质心的方法
    km = KModes(n_clusters=4, init='Huang', n_init=5, verbose=1)

    clusters = km.fit_predict(data)
    print(km.cluster_centroids_)


#t4_KModes_ex()
#t3_elbow_curve()
t2_int_kmode(5, 12)
#t1_kmode() # mode调用会报错
