import numpy as np
import math
import matplotlib.pyplot as plt

path = "./resources/data.txt"
data = []


def loadData(path):
    with open(path, 'r') as fp:
        for line in fp.readlines():
            tempList = line.strip().split(',')
            data.append([float(tempList[1]), float(tempList[2])])
    fp.close()


def showData(clustering, meansVectorAll):
    color = ['blue', 'yellow', 'green']
    size = [25, 100]
    index = -1

    plt.title('mixTureOfGaussian')
    plt.xlabel('density')
    plt.ylabel('sweetness')
    for i in range(len(clustering)):
        for j in range(len(clustering[i])):
            for k in range(len(data)):
                if (data[k] == clustering[i][j]):
                    if 8 <= k <= 20:
                        index = 0
                    else:
                        index = 1
                    break
            plt.scatter(clustering[i][j][0], clustering[i][j][1], c=color[i], s=size[index])
        plt.scatter(np.array(meansVectorAll[i])[0][0], np.array(meansVectorAll[i])[0][1], c='red', marker='+', s=100)
    plt.show()


# 定义多元高斯分布概率密度函数
def prob(x, meansVector, covarianceMatrix):
    n = x.shape[0]
    exponential = (-0.5) * (x - meansVector) * covarianceMatrix.I * (x - meansVector).T
    denominator = math.pow(2 * math.pi, n / 2) * math.pow(np.linalg.det(covarianceMatrix), 0.5)
    return math.pow(math.e, exponential) / denominator


# EM算法
def EM(max_iter):
    # 初始化高斯混合分布的模型参数
    data_mat = np.mat(data)
    samplesNumber = data_mat.shape[0]

    alpha = [1/3, 1/3, 1/3]
    meansVectorAll = [data_mat[5], data_mat[21], data_mat[26]]
    covarianceMatrixAll = [np.mat([[0.1, 0.0], [0.0, 0.1]]) for i in range(3)]
    # 若转为矩阵则会添加一维
    posteriorProbability = np.zeros((samplesNumber, 3))

    current_iter = 0
    while current_iter < max_iter:
        current_iter += 1
        # 计算每个样本由各混合成分生成的后验概率
        for i in range(samplesNumber):
            normalizationFactor = 0
            for j in range(3):
                posteriorProbability[i][j] = alpha[j] * prob(data_mat[i], meansVectorAll[j], covarianceMatrixAll[j])
                normalizationFactor += posteriorProbability[i][j]

            for j in range(3):
                posteriorProbability[i][j] /= normalizationFactor

        # 将后验概率数组列相加，方便后面更新
        sumPosteriorProbability = np.sum(posteriorProbability, axis=0)

        # 更新模型参数
        for i in range(3):
            tempMeansVector = 0
            tempCovariance = 0
            # 更新均值向量
            for j in range(samplesNumber):
                tempMeansVector += posteriorProbability[j][i] * data_mat[j]
            meansVectorAll[i] = tempMeansVector/sumPosteriorProbability[i]
            # 更新协方差矩阵
            for j in range(samplesNumber):
                tempCovariance += posteriorProbability[j][i] * (data_mat[j]-meansVectorAll[i]).T \
                                  * (data_mat[j]-meansVectorAll[i])
            covarianceMatrixAll[i] = tempCovariance/sumPosteriorProbability[i]
            # 更新混合系数
            alpha[i] = sumPosteriorProbability[i]/samplesNumber

    clustering = {}
    for i in range(3):
        clustering[i] = []
    for j in range(samplesNumber):
        maxPosteriorProbabilityIndex = posteriorProbability[j].argmax()
        clustering[maxPosteriorProbabilityIndex].append(data[j])

    return clustering, meansVectorAll


if __name__ == '__main__':
    loadData(path)
    clustering, meansVectorAll = EM(50)
    print(clustering)
    showData(clustering, meansVectorAll)

