import numpy
from scipy import stats
from matplotlib import pyplot
from sklearn.cluster import KMeans
from sklearn.mixture import GaussianMixture

#独立正态分布的gmm算法实例
# data1=numpy.random.multivariate_normal(mean=numpy.array([2,3]),cov=numpy.array([[1,0],[0,2]]),size=100)
# data2=numpy.random.multivariate_normal(mean=numpy.array([7,8]),cov=numpy.array([[2,0],[0,3]]),size=100)
# data=numpy.vstack((data1,data2))
#数据可视化
# pyplot.scatter(data1[:,0],data1[:,1])
# pyplot.scatter(data[:,0],data[:,1])
# pyplot.show()
def gmm1(data,lamb,u,sigma2,epsilon):
    while True:
        new_u=numpy.empty(u.shape)
        new_sigma2=numpy.empty(sigma2.shape)
        new_lamb=numpy.empty(lamb.shape)
        d0=stats.multivariate_normal(mean=u[0,:],cov=numpy.diag(sigma2[0,:]))
        d1=stats.multivariate_normal(mean=u[1,:],cov=numpy.diag(sigma2[1,:]))
        px=lamb[0]*d0.pdf(data)+lamb[1]*d1.pdf(data)
        p0=lamb[0]*d0.pdf(data)/px
        p1=lamb[1]*d1.pdf(data)/px
        new_lamb[0]=numpy.sum(p0)/data.shape[0]
        new_lamb[1]=numpy.sum(p1)/data.shape[0]
        new_u[0,:]=numpy.sum(p0.reshape(-1,1)*data,axis=0)/numpy.sum(p0)
        new_u[1,:]=numpy.sum(p1.reshape(-1,1)*data,axis=0)/numpy.sum(p1)
        new_sigma2[0,:]=numpy.sum(p0.reshape(-1,1)*(data-new_u[0,:])**2,axis=0)/numpy.sum(p0)#使用新的u可以加快收敛
        new_sigma2[1,:]=numpy.sum(p1.reshape(-1,1)*(data-new_u[1,:])**2,axis=0)/numpy.sum(p1)
        if numpy.linalg.norm(new_sigma2-sigma2)<epsilon and numpy.linalg.norm(new_u-u)<epsilon and numpy.linalg.norm(new_lamb-lamb)<epsilon:
            break
        lamb=new_lamb
        u=new_u
        sigma2=new_sigma2
    p=numpy.vstack((p0,p1)).T
    return new_lamb,new_u,new_sigma2,p
#gmm1(data,numpy.ones(2)/2,numpy.random.rand(2,2)+1,numpy.random.rand(2,2)+1,0.001)
#相关正态分布的gmm算法实例
data1=numpy.random.multivariate_normal(mean=numpy.array([2,3]),cov=numpy.array([[1,0.5],[0.5,2]]),size=100)
data2=numpy.random.multivariate_normal(mean=numpy.array([4,5]),cov=numpy.array([[2,-1],[-1,1]]),size=100)
data=numpy.vstack((data1,data2))
#数据可视化
# pyplot.scatter(data1[:,0],data1[:,1])
# pyplot.scatter(data[:,0],data[:,1])
# pyplot.show()
def mydot(t):
    return numpy.dot(t[0],t[1])
def gmm2(data,k,lamb,u,Sigma,epsilon):
    while True:

        new_Sigma = numpy.empty(Sigma.shape)
        ds=numpy.vstack(tuple([stats.multivariate_normal(mean=u[i,:],cov=Sigma[i]).pdf(data) for i in range(k)]))
        px=numpy.dot(lamb,ds)
        L =numpy.sum(numpy.log(px))
        p=lamb.reshape(-1,1)*ds/px
        new_lamb=numpy.sum(p,axis=1)/data.shape[0]
        new_u=p@data/numpy.sum(p,axis=1).reshape(-1,1)
        for i in range(k):
            sigma_k=0
            for j in range(data.shape[0]):
                sigma_k=sigma_k+p[i,j]*(data[j] - new_u[i, :]).reshape(-1,1)@(data[j] - new_u[i, :]).reshape(1,-1)
            new_Sigma[i]=sigma_k/numpy.sum(p[i,:])
        new_ds = numpy.vstack(tuple([stats.multivariate_normal(mean=new_u[i, :], cov=new_Sigma[i]).pdf(data) for i in range(k)]))
        new_px = numpy.dot(new_lamb, new_ds)
        new_L=numpy.sum(numpy.log(new_px))
        print("lambda:", new_L)
        print("u:", new_u)
        print("Sigma:", new_Sigma)
        if numpy.abs(new_L-L)<epsilon:
            break
        lamb=new_lamb
        u=new_u
        Sigma=new_Sigma

kmeans = KMeans(n_clusters=2)
kmeans.fit(data)
# 获取簇标签
labels = kmeans.labels_

# 获取簇中心
centers = kmeans.cluster_centers_
v1=numpy.cov(data[~numpy.bool_(labels)],rowvar=False)
v2=numpy.cov(data[numpy.bool_(labels)],rowvar=False)
Sigma=numpy.array([v1,v2])
lamb=numpy.array([0.63,0.37])
gmm2(data,2,lamb,centers,Sigma,1e-10)
# pyplot.scatter(data[:, 0], data[:, 1], c=labels)
# pyplot.scatter(centers[:, 0], centers[:, 1], c='red')
# pyplot.show()
gmm = GaussianMixture(n_components=2, covariance_type='full',tol=1e-10)  # 指定有两个分量，协方差矩阵为完全矩阵
gmm.fit(data)
print(gmm.means_)
print(gmm.covariances_)