import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import multivariate_normal

# 第 k 个模型的高斯分布密度函数
def phi(X, mu_k, cov_k):
    norm = multivariate_normal(mean=mu_k, cov=cov_k)
    return norm.pdf(X)

# E 步：计算每个模型对样本的响应度
def getExpectation(X, mu, cov, pi):
    m = X.shape[0]
    K = pi.shape[0]

    w = np.mat(np.zeros((m, K)))

    # 计算各模型中所有样本出现的概率，行对应样本，列对应模型
    prob = np.zeros((m, K))
    for k in range(K):
        prob[:, k] = phi(X, mu[k], cov[k])
    prob = np.mat(prob)

    # 计算每个模型对每个样本的响应度
    for k in range(K):
        w[:, k] = pi[k] * prob[:, k]
    for i in range(m):
        w[i, :] /= np.sum(w[i, :])
    return w

# M 步：迭代模型参数
def maximize(X, w):
    m, n = X.shape
    K = w.shape[1]

    #初始化参数值
    mu = np.zeros((K, n))
    cov = []
    pi = np.zeros(K)

    # 更新每个模型的参数
    for k in range(K):
        # 第 k 个模型对所有样本的响应度之和
        mk = np.sum(w[:, k])
        # 更新 mu, 对每个特征求均值
        for d in range(n):
            mu[k, d] = np.sum(np.multiply(w[:, k], X[:, d])) / mk
            # mu[k, d] = np.dot(w[:, k].T, X[:, d]) / mk
        # 更新 cov
        cov_k = np.mat(np.zeros((n, n)))
        for i in range(m):
            cov_k += w[i, k] * (X[i] - mu[k]).T * (X[i] - mu[k]) / mk

        cov.append(cov_k)
        # 更新 pi
        pi[k] = mk / m
    cov = np.array(cov)
    return mu, cov, pi

# 数据预处理,将所有数据都缩放到 0 和 1 之间
def scale_data(X):
    for i in range(X.shape[1]):
        max_ = X[:, i].max()
        min_ = X[:, i].min()
        X[:, i] = (X[:, i] - min_) / (max_ - min_)
    print("Data scaled.")
    return X

# 初始化模型参数, shape 是(样本数, 特征数), K 表示模型个数
def init_params(shape, K):
    m, n = shape
    mu = np.random.rand(K, n)
    cov = np.array([np.eye(n)] * K)
    pi = np.array([1.0 / K] * K)
    print("Parameters initialized.")
    print("mu:", mu, "cov:", cov, "pi:", pi, sep="\n")
    return mu, cov, pi

def computeLikehood(X, mu, cov, pi):
    m = X.shape[0]
    K = pi.shape[0]
    prob = np.zeros((m, K))
    for k in range(K):
        prob[:, k] = phi(X, mu[k], cov[k])
    prob = np.mat(prob)
    L = np.sum(np.log(np.sum(np.multiply(pi, prob), axis=1)), axis=0)
    return L

# 高斯混合模型 EM 算法, 给定样本矩阵 X，K 为模型个数, times 为迭代次数
def GMM_EM(X, K, times):
    X = scale_data(X)
    mu, cov, pi = init_params(X.shape, K)
    like_his = []
    for i in range(times):
        w = getExpectation(X, mu, cov, pi)
        mu, cov, pi = maximize(X, w)
        like_val = computeLikehood(X, mu, cov, pi)
        like_his.append(like_val)
        print("step:", i, "L:", like_val)
    print("{sep} Result {sep}".format(sep="-" * 20))
    print("mu:", mu, "cov:", cov, "pi:", pi, sep="\n")
    return mu, cov, pi, like_his

if __name__ == '__main__':
    # 载入数据
    import pandas as pd
    df = pd.read_csv("HeightWeight.csv", delimiter=',')
    X = df.iloc[:, 1:3].values
    matX = np.matrix(X, copy=True)

    # 模型个数，即聚类的类别个数
    K = 2

    # 计算 GMM 模型参数
    mu, cov, pi, like_his = GMM_EM(matX, K, 100)

    plt.plot(np.squeeze(like_his))
    plt.show()

    # 求当前模型参数下，各模型对样本的响应度矩阵
    w = getExpectation(matX, mu, cov, pi)
    # print("w:",w)

    # 对每个样本，求响应度最大的模型下标，作为其类别标识
    category = w.argmax(axis=1).flatten().tolist()[0]

    # 绘制聚类结果
    plt.scatter(X[:, 0], X[:, 1], c=category)
    plt.title("GMM Clustering By EM Algorithm")
    plt.show()
