# -*- coding: utf-8 -*- 
"""
  Copyright (c) 2022, colburn
  All rights reserved.
  email:bjay@qq.com
  EM算法示例
"""#"
import argparse
import sys
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import time
import math

import numpy as np
import scipy.stats as stats
import torch
import torch.nn as nn

def parseArgument():
  parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
  parser.add_argument('data', metavar='PROBLIST', default=2, type=int,
                    help='initial Probability list, like 2')
  parser.add_argument('-c', '--category', default=1, type=int, metavar='C',
                    help='EM category(default=1)(0:Binom, 1:Norm)')
  parser.add_argument('--gpu', default=None, type=int,
                    help='GPU id to use.')
  parser.add_argument('-n', '--ncluster', default=2, type=int,
                    help='the number of clustering, default =2')
  parser.add_argument('-d', '--dimension', default=2, type=int,
                    help='the dimension of data, default =2')
  parser.add_argument('--multiprocessing-distributed', action='store_true',
                    help='Use multi-processing distributed training to launch '
                         'N processes per node, which has N GPUs. This is the '
                         'fastest way to use PyTorch for either single node or '
                         'multi node data parallel training')
  return parser

class myEm(nn.Module):
  def __init__(self, args=None, le =2, Ndim =2):
    if args !=None:
      self.categ =args.category
    self.beta =0.9
    if torch.cuda.is_available():
      self.gmu =torch.ones([le,Ndim,1]).cuda()
      self.gsigma =(torch.ones([le, Ndim, Ndim]) *torch.eye(Ndim)).cuda()
    else:
      self.gmu =torch.ones([le,Ndim,1])
      self.gsigma =(torch.ones([le, Ndim, Ndim]) *torch.eye(Ndim))
      
  
  def genBinomData(self, le):
    size =10000
    probs =[]
    persize =size //le
    perprob =100/(le+1)
    prob =100/(le+1)
    a =np.random.random(persize)[:,np.newaxis]
    a[a <(prob/100)] =0
    a[a >=(prob/100)] =1
    probs.append(1. -prob/100)
    
    for i in range(le -2):
      prob +=perprob
      b =np.random.random(persize)[:,np.newaxis]
      b[b <(prob/100)]  =0
      b[b >=(prob/100)] =1
      probs.append(1. -prob/100)
      a =np.vstack((a,b))
  
    prob +=perprob
    b =np.random.random(size -len(a))[:,np.newaxis]
    b[b <(prob/100)]  =0
    b[b >=(prob/100)] =1
    probs.append(1. -prob/100)
    a =np.vstack((a,b))
    
    a =a.reshape(len(a)//10,10)
    np.random.shuffle(a)
    print(probs)
    return a, probs
  
    a =[[1,0,0,0,1,1,0,1,0,1],
        [1,1,1,1,0,1,1,1,1,1],
        [1,0,1,1,1,1,1,0,1,1],
        [1,0,1,0,0,0,1,1,0,0],
        [0,1,1,1,0,1,1,1,0,1]]
    a =np.asarray(a)
    return a
  
  def genNormData(self, le):
    size =10000
    #生成le种正态分布占比
    prt =np.zeros([le,3], dtype=float)
    s =np.random.random(le)[:,np.newaxis]
    s =s/np.sum(s)
    #设置每种正态分布的均值、方差
    prt[:,0] =s.reshape(prt.shape[0])
    for i in range(le):
      prt[i,1] =130. +i*30
      prt[i,2] =10. +i*2
    
    print(prt)
    x =np.zeros([0], dtype = float)
    print(x)
    tsize =0
    #根据上面设置的数量、均值、方差生成数据
    for i in range(le -1):
      persize =int(np.floor(prt[i,0] *size))
      print(x.shape, (np.random.randn(persize) *prt[i, 2] +prt[i,1]).shape)
      x =np.hstack((x, np.random.randn(persize) *prt[i, 2] +prt[i,1]))
      tsize +=persize
  
    x =np.hstack((x, np.random.randn(size -tsize) *prt[le -1, 2] +prt[le -1,1]))
    np.random.shuffle(x)
    return x, prt
  
    prt =np.asarray([[0.6, 170, 20], [0.4, 180, 15]], dtype =float)
    a =np.random.randn(size)[:,np.newaxis]
    b =a*prt[0, 2] +prt[0,1]
    c =a*prt[1, 2] +prt[1,1]
    s =np.random.random(size)[:,np.newaxis]
    s[s <prt[1, 0]] =0
    s[s >=prt[1, 0]] =1
    probs =np.asarray([[0.1, 10, 50], [0.9, 20, 80]], dtype =float)
    return probs, (b*s +c*(1-s)).reshape(size//100, 100), prt

  def genNdimNormData(self, le, Ndim =2):
    size =10000
    #生成le种正态分布占比
    s =np.random.random(le)
    s =s/np.sum(s)
    #设置每种正态分布的均值、方差
    prt =[s.reshape(le,1),[],[],[],[]]
    for i in range(le):
      prt[1].append((np.random.random(Ndim)*20).reshape(Ndim,1))
      prt[2].append(np.random.random(Ndim *Ndim).reshape(Ndim, Ndim))
      t =np.random.random(Ndim)*2
      print(prt[2], 'prt[2]')
      for j in range(Ndim):
        for k in range(j+1):
          if j==k:
            prt[2][-1][j,k] =t[j] *t[j]
          else:
            prt[2][-1][k,j] *=t[k]*t[j]
            prt[2][-1][j,k] =prt[2][-1][k,j]
      #为后续计算方便，计算方差矩阵的逆和行列式的值。
      #print(prt[i,2], prt[i,2].shape)
      prt[3].append(np.linalg.inv(prt[2][-1]))
      prt[4].append(np.sqrt(np.linalg.det(prt[2][-1])))
    for i in range(len(prt) -1):
      prt[i +1] =np.asarray(prt[i +1])
    prt[4] =prt[4].reshape(le,1)
    print(prt, 'prt')

    #根据上面设置的数量、均值、方差生成数据
    x =np.empty([Ndim,0], dtype = float)
    tsize =0
    for i in range(le -1):
      persize =int(np.floor(prt[0][i] *size))
      print(prt[1][i].shape)
      x =np.hstack((x, np.matmul(prt[2][i], np.random.randn(Ndim,persize))+ prt[1][i]))
      tsize +=persize
    x =np.hstack((x,np.matmul(prt[2][-1], np.random.randn(Ndim,size -tsize)) +prt[1][-1])).T
    np.random.shuffle(x)
    x =x.T
    return x, prt

  def eStep(self, probs,observation):
    #E Step
    #求出每个样本点在probs参数下的概率值
    shapek =tuple(np.hstack((np.asarray([len(probs[0])], dtype=int),np.ones(len(observation.shape), dtype=int))))
    #print(shapek)
    pqArray =torch.tile(observation, shapek)
    a =pqArray -probs[1]
    b =torch.matmul(probs[3], a)
    a =torch.exp(torch.abs(torch.matmul(torch.transpose(a,-1,-2).unsqueeze(-2),
      torch.transpose(b, -1, -2).unsqueeze(-1)).squeeze(-1).squeeze(-1)) /(-2.))
    #print('a.shape, b.shape =', a.shape, b.shape)
    b =(((2*np.pi) **(observation.shape[0]/2.)) *probs[4])
    #print('a.shape =', a.shape, 'b.shape =', b.shape, 'pb[0].shape =', probs[0].shape)
    pqArrayta =(a /b)
    #for i in range(pqArrayt.shape[0]):
    #  if torch.max(pqArrayt[i]) >1:
    #    print(torch.max(pqArrayt[i]))
    #    loc =torch.argmax(pqArrayt[i])
    #    print(loc)
    #    print(pqArrayt[i, loc])
    #    print(observation[:, loc], i)
    #    print(probs)
    #    exit();
    pqArrayt =pqArrayta*probs[0]
    #print(pqArrayt,'  pqArrayt')
    #print('pqArrayt.shape =',pqArrayt.shape) #[2,x]

    wt =torch.sum(pqArrayt, axis =0)
    loc =torch.where(wt ==0)
    wt[loc] =1
    wji =pqArrayt /wt
    if loc[0].shape[0] >1 :
      wj =torch.sum(wji, axis=1)
      pqArrayt[torch.argmin(wj)][loc] =1
      wji =pqArrayt /wt
    #print('pqArrayt.shape =',pqArrayt.shape)
    return pqArray, wji, pqArrayta

  def emNdimNormSingle(self, probs,observation):
    pqArray, wji, pqArrayta =self.eStep(probs,observation)
    #M step
    wj =torch.sum(wji, axis=1)
    #print(torch.where(torch.isinf(wji)))
    alpha =wj/wji.shape[1]
    mu =(torch.sum(wji.unsqueeze(-2) *pqArray, axis=(len(pqArray.shape) -1))
      /(wj.unsqueeze(-1))).unsqueeze(-1)
    a = torch.transpose(pqArray -mu, -1,-2).unsqueeze(-1)
    a =torch.matmul(a, torch.transpose(a, -1, -2))
    a =(wji.unsqueeze(-1).unsqueeze(-1)) *a
    sigma =torch.sum(a, axis =-3) /(wj.unsqueeze(-1).unsqueeze(-1))
    try:
      sigmaI =torch.linalg.inv(sigma)
    except Exception as e:
      print(sigma)
      print(e)
      exit()
    sigmaD =torch.sqrt(torch.linalg.det(sigma)).unsqueeze(-1)
    pret =(alpha.unsqueeze(-1), mu, sigma, sigmaI, sigmaD, probs[5])
    return pret
  
  def emNdimNormSingleGD(self, probs,observation):
    #E Step
    #求出每个样本点在probs参数下的概率值
    shapek =tuple(np.hstack((np.asarray([len(probs[0])], dtype=int),np.ones(len(observation.shape), dtype=int))))
    #print(shapek)
    pqArray =torch.tile(observation, shapek)
    #print('pqArray.shape =', pqArray.shape, 'probs[1].shape =', probs[1].shape, probs[3].shape)
    a =torch.transpose(pqArray -probs[1], -1,-2)
    #print(a.unsqueeze(-2).shape, probs[3].unsqueeze(-3).shape)
    b =torch.matmul(torch.matmul(a.unsqueeze(-2), probs[3].unsqueeze(-3)), a.unsqueeze(-1)).squeeze(-1).squeeze(-1)
    #print(b.shape)
    a =torch.exp(torch.abs(b.squeeze(-1).squeeze(-1))/(-2.))
    b =(((2*np.pi) **(observation.shape[0]/2.)) *probs[4])
    #print(a, 'a', a.shape, b, 'b')
    #print('a.shape =', a.shape, 'b.shape =', b.shape, 'pb[0].shape =', probs[0].shape)
    pqArrayt =(a /b) *probs[0]
    loc = torch.where(torch.isinf(pqArrayt))
    #print(loc,'loc', loc[0].shape)
    pqArrayt[loc] =0
    #loc1 = torch.where(pqArrayt >1.)
    #print(loc1,'loc1', loc1[0].shape)
    #if loc1[0].shape[0] >0 :
    #  print(pqArrayt[loc1], 'pqArrayt[loc1]')
    #  print(probs[1].shape, probs[3].shape, probs[4].shape)
    #  np.savetxt('txt/observation.txt', observation.detach().cpu().numpy(), fmt="%f",delimiter=",")
    #  print(probs)
    #  exit()
    #print(pqArrayt,'  pqArrayt')
    #print(pqArrayt.shape)
    
    #M Step
    wt =torch.sum(pqArrayt, axis =0)
    if torch.isnan(wt).any():
      print('wt isnan')
    #print(wt, 'wt')
    wt[torch.where(wt ==0)] =1
    wji =(pqArrayt /wt)
    
    if torch.isnan(wji[0]).any():
      print('wji[0] isnan')
      loc1 = torch.where(torch.isnan(wji))
      print(loc1,'loc1', loc1[0].shape)
      print(probs, 'probs')
      print(wt[2343], 'wt', pqArray.shape, pqArrayt.shape, wji.shape)
      print(pqArray[0,:,2343])
      print(pqArrayt[:,2343])
      print(wji[:,2343])
      np.savetxt('txt/observation.txt', observation.detach().cpu().numpy(), fmt="%f",delimiter=",")
      np.savetxt('txt/pqArrayt.txt', pqArrayt.detach().cpu().numpy(), fmt="%f",delimiter=",")
      np.savetxt('txt/nan.txt', wji.detach().cpu().numpy(), fmt="%f",delimiter=",")
      np.savetxt('txt/wt.txt', wt.detach().cpu().numpy(), fmt="%f",delimiter=",")
      exit()
    if torch.isnan(wji[1]).any():
      print('wji[1] isnan')
    loc = torch.where(wji != wji)
    wji[ loc ] = 0
    
    #print(wji,'  wji', wji.shape)
    
    #M step
    wj =torch.sum(wji, axis=-1)
    #print(wj, 'wj')
    alpha =(wj/wji.shape[-1]).unsqueeze(-1)
    #print(alpha, '+++alpha++++', torch.sum(alpha))
    wji =wji.unsqueeze(-2)
    #print('wji.shape =',wji.shape, 'pqArray.shape =', pqArray.shape, 'wj.shape =',wj.shape)
    #print(pqArray.shape, probs[1].shape, probs[3].shape, wji.shape, wj.shape)
    #print('a.shape =', a.shape, 'probs[1].shape =', probs[1].shape, 'probs[2].shape =', probs[2].shape)
    muGD =(torch.sum(wji *torch.matmul(probs[3], (pqArray -probs[1])),
      axis=len(pqArray.shape) -1)/wj.unsqueeze(-1)).unsqueeze(-1)
    #print(muGD, 'muGD.shape =', muGD.shape)
    #print(probs[1])
    #print(wj, 'wj')
    #print(self.gmu, 'self.gmu', self.gmu.shape)
    self.gmu =self.beta *self.gmu +(1-self.beta) *muGD
    #print(self.gmu, 'self.gmu', self.gmu.shape)
    mu =probs[1] -self.gmu
    #print(probs[1])
    #print(mu, 'mu', mu.shape)

    #sigma
    a = torch.transpose(pqArray -mu, -1,-2).unsqueeze(-1)
    #print(a.shape, probs[3].unsqueeze(1).shape, probs[4].unsqueeze(-2).unsqueeze(-2).shape)
    #b =torch.matmul(torch.matmul(torch.matmul(
    #  torch.transpose(a, -1,-2), probs[3].unsqueeze(1)),
    #  probs[3].unsqueeze(1)), a).squeeze(-1).squeeze(-1) -probs[4] *probs[4]
    b =torch.matmul(probs[3].unsqueeze(1), a)
    b =torch.matmul(b, torch.transpose(b, -1,-2)) -(probs[4] *probs[4]).unsqueeze(-2).unsqueeze(-2)
    #print(b, 'b', b.shape)
    #print(wji.squeeze(-2).unsqueeze(-1).unsqueeze(-1), 'wji', wji.squeeze(-2).unsqueeze(-1).unsqueeze(-1).shape)
    #print(torch.sum(wji.squeeze(-2).unsqueeze(-1).unsqueeze(-1) *b, axis =1).shape)
    #print(b.shape, wji.squeeze(-2).unsqueeze(-1).unsqueeze(-1).shape, wj.unsqueeze(-1).shape)
    sigmaGD =torch.sum(wji.squeeze(-2).unsqueeze(-1).unsqueeze(-1) *b, axis =1)/wj.unsqueeze(-1)
    #print(sigmaGD, 'sigmaGD', sigmaGD.shape)
    #print(self.gsigma, 'self.gsigma', self.gsigma.shape)
    self.gsigma =self.beta *self.gsigma +(1 -self.beta) *sigmaGD
    #print(self.gsigma, 'self.gsigma', self.gsigma.shape)
    sigma =probs[2] -self.gmu
    sigmaI =torch.linalg.inv(sigma)
    #print(torch.matmul(sigma, sigmaI))
    #print(torch.linalg.det(sigma))
    sigmaD =torch.sqrt(torch.abs(torch.linalg.det(sigma))).unsqueeze(-1)
    pret =(alpha, mu, sigma, sigmaI, sigmaD)
    #print(pret, 'pret')
    return pret
    
    muGD =ff
    #print('a.shape =',a.shape)
    a =torch.matmul(a, torch.transpose(a, -1, -2))
    #print('a.shape =', a.shape, 'wji.shape =', wji.unsqueeze(-1).unsqueeze(-1).shape)
    a =wji.unsqueeze(-1).unsqueeze(-1) *a
    #print('a.shape =', a.shape, 'wj.shape =', wj.shape)
    #print(torch.sum(a, axis =-3))
    #print(wj)
    sigma =torch.sum(a, axis =-3)/wj.unsqueeze(-1).unsqueeze(-1)
    #print(sigma, '+++++sigma+++++', sigma.shape)
    sigmaI =torch.linalg.inv(sigma)
    #print(torch.matmul(sigma, sigmaI))
    sigmaD =torch.sqrt(torch.linalg.det(sigma)).unsqueeze(-1)
    #print(probs, 'probs[4]')
    pret =(alpha, mu, sigma, sigmaI, sigmaD)
    #print(pret, 'pret')
    return pret
  
  def emNormSingleTt(self, probs,observation):
    """
    EM算法的单次迭代
    Arguments
    ------------
    priors:每组身高期望与方差列表
    observation:观察值列表（身高列表），每元素取值为float
   
    Returns
    ---------------
    newProbs:更新后可能身高的列表
    """#"
    #print('\n%d: \n'%sys._getframe().f_lineno, probs, 'probs ', probs.shape)
    #print('observations ', observations.shape)
    #records = np.zeros([2,0], dtype = float)
  
    #E step
    if len(observation.shape) >1:
      observation =observation.reshape(observation.shape[0]*observation.shape[1])
    #生成k*n的样本矩阵，k个分布，n个样本
    pqArray =torch.tile(observation, (probs.shape[0], 1))
    #print(pqArray.shape)
    pa =probs[:,0].reshape(probs.shape[0],1)
    pu =probs[:,1].reshape(probs.shape[0],1)
    ps =probs[:,2].reshape(probs.shape[0],1)
    #print(pu, 'pu')
    #print(ps, 'ps')
    #在k个分布下的每个样本概率
    pqArrayt =(torch.exp((pqArray -pu) *(pqArray -pu) /(-2 *ps *ps)) /(np.sqrt(2 *np.pi) *ps))*pa
  
    #求出包含隐变量的每个样本概率
    wt =torch.sum(pqArrayt, axis =0)
    #print(pqArray, 'pqArray')
    #print(wt, 'wt')
    #print(pqArray -wt, 'pqArray-wt')
    #print('wt.shape =', wt.shape)
    wji =pqArrayt /wt
    #print(pqArrayt, 'pqArrayt')
    #print(wt, 'wt')
    #print (wji, 'wji')
    #print(np.sum(wt), 'np.sum(wt)')
    wji[wji <0.1] =0
    wt =torch.sum(pqArrayt, axis =0)
    wji =pqArrayt /wt
  
    #M step
    wj =torch.sum(wji, axis=1)
    #利用边沿分布求出alpha
    alpha =wj/observation.shape[0]
    #pqArray =torch.tile(observation, (probs.shape[0], 1))
  
    #利用最大似然结果，求出均值
    mu =torch.sum(wji *pqArray, axis=1) /wj
    #mu0 =probs[:,1].reshape(probs.shape[0],1)
    mu0 =mu.reshape(probs.shape[0],1)
    #sigma =torch.sqrt(torch.sum(wji *(pqArray -mu0) *(pqArray -mu0), axis =1)
    #  /observation.shape[0] /probs[:,0])
    sigma =torch.sqrt(torch.sum(wji *(pqArray -mu0) *(pqArray -mu0), axis =1) /wj)
    
    return torch.hstack((alpha.reshape(alpha.shape[0], 1),
      mu.reshape(alpha.shape[0], 1), sigma.reshape(alpha.shape[0], 1)))
  
  def emNormSingle(self, probs,observation):
    """
    EM算法的单次迭代
    Arguments
    ------------
    priors:每组身高期望与方差列表
    observation:观察值列表（身高列表），每元素取值为float
   
    Returns
    ---------------
    newProbs:更新后可能身高的列表
    """#"
    #print('\n%d: \n'%sys._getframe().f_lineno, probs, 'probs ', probs.shape)
    #print('observations ', observations.shape)
    #records = np.zeros([2,0], dtype = float)
  
    #E step
    if len(observation.shape) >1:
      observation =observation.reshape(observation.shape[0]*observation.shape[1])
    #生成k*n的样本矩阵，k个分布，n个样本
    pqArray =torch.tile(observation, (probs.shape[0], 1))
    #print(pqArray.shape)
    pa =probs[:,0].reshape(probs.shape[0],1)
    pu =probs[:,1].reshape(probs.shape[0],1)
    ps =probs[:,2].reshape(probs.shape[0],1)
    #print(pu, 'pu')
    #print(ps, 'ps')
    #在k个分布下的每个样本概率
    pqArrayt =(torch.exp((pqArray -pu) *(pqArray -pu) /(-2 *ps *ps)) /(np.sqrt(2 *np.pi) *ps))*pa
  
    #求出包含隐变量的每个样本概率
    wt =torch.sum(pqArrayt, axis =0)
    #print(pqArray, 'pqArray')
    #print(wt, 'wt')
    #print(pqArray -wt, 'pqArray-wt')
    #print('wt.shape =', wt.shape)
    wji =pqArrayt /wt
  
    #M step
    wj =torch.sum(wji, axis=1)
    #利用边沿分布求出alpha
    alpha =wj/observation.shape[0]
    pqArray =torch.tile(observation, (probs.shape[0], 1))
  
    #利用最大似然结果，求出均值
    mu =torch.sum(wji *pqArray, axis=1) /wj
    #mu0 =probs[:,1].reshape(probs.shape[0],1)
    mu0 =mu.reshape(probs.shape[0],1)
    #sigma =torch.sqrt(torch.sum(wji *(pqArray -mu0) *(pqArray -mu0), axis =1)
    #  /observation.shape[0] /probs[:,0])
    sigma =torch.sqrt(torch.sum(wji *(pqArray -mu0) *(pqArray -mu0), axis =1) /wj)
    
    return torch.hstack((alpha.reshape(alpha.shape[0], 1),
      mu.reshape(alpha.shape[0], 1), sigma.reshape(alpha.shape[0], 1)))
  
  def emBinomSingel(self, probs,observations):
    """
    二项分布下EM算法的单次迭代
    Arguments
    ------------
    priors:每枚硬币正面概率的列表
    observation:观察值列表，每元素取值为0或1
   
    Returns
    ---------------
    newProbs:更新后每枚硬币正面概率的列表
    """#"
    counts = torch.zeros([len(probs),2], dtype = float)
    probs =torch.asarray(probs).reshape(len(probs),1)
    #E step
    for observation in observations:
      #每一个观察值各种可能的概率。
      #计算每一个1和0的概率在各个正面概率条件下的概率
      #print(observation)
      pqArray =torch.tile(observation, (len(probs), 1))
      #print('pqArray', pqArray.shape)
      pqArrayt =(2*pqArray -1)*probs +(1 -pqArray)
      #print(pqArrayt)
      #print(probs)
      
      #每一行乘（为保证精度再乘以10^n,n=长度一半）。求得有各种概率条件下求得该组观察值组合的概率
      ps =torch.exp(torch.sum(torch.log(pqArrayt), axis =1)+(len(observation)//2)*torch.log(10))
      #该组观察值在不同概率下的占比
      ps =(ps /torch.sum(ps)).reshape(len(ps), 1)
      #print(ps.reshape(1, len(ps)), 'ps', torch.sum(ps))
      #print(pqArray)
      #print(pqArray*ps)
      #每一个观察值都乘以与之相应的概率，得到该观察值对每种可能概率的贡献
      counts[:,0] +=torch.sum(pqArray*ps, axis =1)
      counts[:,1] -=torch.sum((pqArray -1)*(ps), axis =1)
      #pqArrayt =(2*pqArray -1)*ps +(1 -pqArray)
      
      #print()
      
    #M step
    #print(counts, counts.sum())
    newProbs=counts[:,0] /torch.sum(counts, axis=1)
    return newProbs
   
  def em01_single(self, priors,observations):
    """
    EM算法的单次迭代
    Arguments
    ------------
    priors:每枚硬币正面概率的列表
    observation:每元素取值为0或1
   
    Returns
    ---------------
    new_priors:更新后每枚硬币正面概率的列表
    """#"
    probs = priors
    counts = torch.zeros([len(priors),2], dtype = float)
    contributions =torch.zeros(len(priors), dtype = float)
    weights =torch.zeros(len(priors), dtype = float)
    #E step
    for observation in observations:
      len_observation = len(observation)
      num_heads = observation.sum()           #正面次数
      num_tails = len_observation -num_heads  #反面次数
      #二项分布求解公式
      for i in range(len(priors)):
        #每次正面概率为thetas[i]的二项分布中，len_observation次试验，num_heads次正面的概率
        contributions[i] =stats.binom.pmf(num_heads,len_observation,probs[i])
   
      cons =contributions.sum()
      for i in range(len(priors)):
        weights[i] =contributions[i] /cons
      #更新在当前参数下A，B硬币产生的正反面次数
      for i in range(len(priors)):
        counts[i,0] +=weights[i] *num_heads
        counts[i,1] +=weights[i] *num_tails
   
    # M step
    #counts.sum()=1000,
    #print('counts.sum()=%f'%counts.sum())
    newProbs=torch.zeros(len(priors), dtype = float)
    for i in range(len(priors)):
      newProbs[i] =counts[i,0] / (counts[i,0] +counts[i,1])
    return newProbs
  
  #def forward(self, im_q, im_k):
  #def em(self, observations,prior,tol = 1e-6,iterations=1000000):
  def updateprior(self, prior, new_prior):
    beta =0.9
    prt =[[],[],[],[],[]]
    prt[0] =(beta *prior[0] +(1 -beta)*new_prior[0])
    prt[1] =(beta *prior[1] +(1 -beta)*new_prior[1])
    prt[2] =(beta *prior[2] +(1 -beta)*new_prior[2])
    
    prt[3] =torch.linalg.inv(prt[2])
    prt[4] =torch.sqrt(torch.linalg.det(prt[2])).unsqueeze(-1)
    prt =(prt[0], prt[1], prt[2], prt[3], prt[4])
    #print(prt)
    return prt
    
  def forward(self, observations,prior, mu =None, tol = 1e-6,iterations=50000, sFunc=None):
    """
    EM算法
    ：param observations :观测数据
    ：param prior：模型初值
    ：param tol：迭代结束阈值
    ：param iterations：最大迭代次数
    ：return：局部最优的模型参数，更新后每枚硬币正面概率的列表
    """#"
    iteration = 0;
    while iteration < iterations:
      #new_prior =emBinomSingel(prior,observations)
      #new_prior =self.emNormSingle(prior,observations)
      if sFunc ==None:
        new_prior =self.emNdimNormSingle(prior,observations)
      else:
        new_prior =sFunc(prior,observations)
      for item in new_prior:
        if torch.isnan(item).any():
          fi =open("data.txt", "a+")
          st =str(observations)
          fi.write(st)
          fi.flush()
          fi.close()
          print('some value is nan')
          new_prior =prior
          break
  
      if mu !=None :
        if torch.cuda.is_available():
          mu.append(new_prior[1].squeeze(-1).detach().cpu().numpy())
        else:
          mu.append(new_prior[1].squeeze(-1).numpy())
      #delta_change = torch.abs(prior[0]-new_prior[0])
      delta_change =0
      for i in range(3):
        nshape =1
        for item in prior[i].shape:
          nshape *=item
        #print(i)
        delta_change +=torch.sqrt(torch.dot(
          (prior[i] -new_prior[i]).reshape(nshape),
          (prior[i] -new_prior[i]).reshape(nshape)
        ))
      #print(delta_change)
      if delta_change >=tol:
        #prior = self.updateprior(prior, new_prior)
        prior =new_prior
        iteration +=1
      else:
        break;
      if iteration%10000 ==0:
        print([new_prior,iteration], '行号为%d'%sys._getframe().f_lineno)
    return (new_prior,iteration, mu)

  def estimateCluster1(self, observation, prt, mu =None, tol = 1e-6, iterations=1000):
    over =0
    td0 =self.forward(observation, prt, mu =mu, tol =tol, iterations =iterations)
    prt =td0[0]
    iteration =td0[1]
    mu =td0[2]
    print('enter estimateCluster loop=================================')
    print(prt)
    (prt0, prt1, prt2, prt3, prt4, prt5) =prt
    oldprt =prt
    while over ==0:
      nmprt =0
      print(prt0, prt1, prt2, prt3, prt4, prt5, 'line %d in '%sys._getframe().f_lineno, sys._getframe().f_code.co_name)
      if prt0.shape[0] >1:
        #多点判断（聚点间距小于方差？）（两个聚点所代表的点集属于同一分布）
        for i in range(prt0.shape[0]):
          #if prt5[i] >0:
            #聚点周围1/2范围内点个数大于1/2
            '''
            a =observation -prt1[i]
            b =torch.matmul(prt3[i], a)
            #print(a.shape)
            pqArrayta =torch.abs(torch.matmul(torch.transpose(a,-1,-2).unsqueeze(-2),
              torch.transpose(b, -1, -2).unsqueeze(-1)).squeeze(-1).squeeze(-1))
            '''
            a =observation -prt1[i]
            print(prt3[i].shape, a.shape)
            b =torch.matmul(prt3[i], a)
            print(torch.transpose(a, 0,1).unsqueeze(1).shape, torch.transpose(b, 0,1).unsqueeze(-1).shape)
            pqArrayta =torch.abs(torch.matmul(torch.transpose(a, 0,1).unsqueeze(1),
              torch.transpose(b, 0,1).unsqueeze(-1)).squeeze(-1).squeeze(-1))
            n1 =torch.where(pqArrayta <0.4549364231195727, 1., 0.,)
            print(torch.sum(n1), n1.shape[0], prt0[i])
            print(prt1[i], torch.sum(n1) /n1.shape[0] /prt0[i], '--------------------', i)
            if torch.sum(n1) /n1.shape[0] /prt0[i] > 0.39: #0.52:  
              #找到当前聚点最近的聚点，合并两点
              nmprt =1
              a =prt1 -prt1[i].unsqueeze(0)
              a =torch.matmul(torch.transpose(a, -1,-2), a).squeeze(-1).squeeze(-1)
              a =a *torch.where(prt5 <0, -1,1)
              sorted, indices =torch.sort(a)
              loc =torch.where(sorted ==0)
              if loc[0] ==sorted.shape[0] -1:
                indexf =sorted[loc[0]]
              else:
                indexf =sorted[loc[0] +1]
              indexf =torch.where(a ==indexf)[0]
              print(a, 'a', 'line %d in '%sys._getframe().f_lineno, sys._getframe().f_code.co_name)
              print(i, indexf, prt5, 'line %d in '%sys._getframe().f_lineno, sys._getframe().f_code.co_name)

              if prt5[indexf] >=0:
                prt5[indexf] =0
                prt5[i] =-(indexf +1)
              else:
                prt5[i] =prt5[indexf]
            
        print(prt5, 'prt5', 'line %d in '%sys._getframe().f_lineno, sys._getframe().f_code.co_name)
        for i in range(prt5.shape[0]):
          if prt5[i] <0:
            indexf =prt5[i]
            print(indexf, 'line %d in '%sys._getframe().f_lineno, sys._getframe().f_code.co_name)
            while indexf <0:
              print(indexf , prt5[-int(indexf) -1], 'line %d in '%sys._getframe().f_lineno, sys._getframe().f_code.co_name)
              if prt5[-int(indexf) -1] <0:
                indexf =prt5[-int(prt5[i]) -1]
              else:
                break;
            prt0[-int(indexf) -1] +=prt0[i]
        for i in range(prt0.shape[0] -1, -1, -1):
          print(i,'  +++++++++++++++++++++++++++')
          if prt5[i] <0:
            print(i, int(prt5[i]), prt0)
            prt0 =torch.vstack((prt0[0:i], prt0[i+1:]))
            prt1 =torch.vstack((prt1[0:i], prt1[i+1:]))
            prt2 =torch.vstack((prt2[0:i], prt2[i+1:]))
            prt3 =torch.vstack((prt3[0:i], prt3[i+1:]))
            prt4 =torch.vstack((prt4[0:i], prt4[i+1:]))
            prt5 =torch.hstack((prt5[0:i], prt5[i+1:]))
            print(prt0, prt1, prt2, prt3, prt4, prt5)
            print('+++++++++++++++++++++++++++')
          
        #多点判断（点数太少，不具统计意义，与周围合并）
        for i in range(prt5.shape[0]):
          #print(prt0.shape, observation.shape)
          if prt0[i] *observation.shape[1] <70:
            nmprt =1
            a =prt1 -prt1[i].unsqueeze(0)
            a =torch.matmul(torch.transpose(a, -1,-2), a).squeeze(-1).squeeze(-1)
            a =a *torch.where(prt5 <0, -1,1)
            sorted, indices =torch.sort(a)
            loc =torch.where(sorted ==0)
            if loc[0] ==sorted.shape[0] -1:
              indexf =sorted[loc[0]]
            else:
              indexf =sorted[loc[0] +1]
            indexf =torch.where(a ==indexf)[0]
            print(a, 'a', 'line %d in '%sys._getframe().f_lineno, sys._getframe().f_code.co_name)
            print(i, indexf, prt5, 'line %d in '%sys._getframe().f_lineno, sys._getframe().f_code.co_name)

            if prt5[indexf] >=0:
              prt5[indexf] =0
              prt5[i] =-(indexf +1)
            else:
              prt5[i] =prt5[indexf]

        print(prt5, 'prt5', 'line %d in '%sys._getframe().f_lineno, sys._getframe().f_code.co_name)
        for i in range(prt5.shape[0]):
          if prt5[i] <0:
            indexf =prt5[i]
            while indexf <0:
              if prt5[-int(indexf) -1] <0:
                indexf =prt5[-int(prt5[i]) -1]
              else:
                break;
            prt0[-int(indexf) -1] +=prt0[i]
        for i in range(prt0.shape[0] -1, -1, -1):
          print(i,'  +++++++++++++++++++++++++++')
          if prt5[i] <0:
            print(i, int(prt5[i]), prt0)
            prt0 =torch.vstack((prt0[0:i], prt0[i+1:]))
            prt1 =torch.vstack((prt1[0:i], prt1[i+1:]))
            prt2 =torch.vstack((prt2[0:i], prt2[i+1:]))
            prt3 =torch.vstack((prt3[0:i], prt3[i+1:]))
            prt4 =torch.vstack((prt4[0:i], prt4[i+1:]))
            prt5 =torch.hstack((prt5[0:i], prt5[i+1:]))
            print(prt0, prt1, prt2, prt3, prt4, prt5)
            print('+++++++++++++++++++++++++++')
          
      print(prt0, prt1, prt2, prt3, prt4, prt5)
      
      #少点判断
      #1、聚点周围点概率都比较低（聚点周围无点）
      #2、聚点可能需要增加（两种分布距离很近甚至中间有重叠）
      #利用标准正态分布概率分布函数P(X<=0.6744897501960817)=0.75
      #0.6744897501960817*0.6744897501960817 =0.4549364231195727
      nlprt=0
      print(observation, 'observation')
      #print(prt, 'prt')
      for i in range(prt5.shape[0]):
        print(prt5, 'prt5', i, 'line %d in '%sys._getframe().f_lineno, sys._getframe().f_code.co_name)
        if prt5[i] >0:
          a =observation -prt1[i]
          b =torch.matmul(prt3[i], a)
          #print(a.shape)
          pqArrayta =torch.abs(torch.matmul(torch.transpose(a,-1,-2).unsqueeze(-2),
            torch.transpose(b, -1, -2).unsqueeze(-1)).squeeze(-1).squeeze(-1))
          n1 =torch.where(pqArrayta <0.4549364231195727, 1., 0.,)
          print(n1, 'nl')
          print(n1.shape, torch.sum(n1), prt0[i])
          print(torch.sum(n1) /n1.shape[0] /prt0[i], '--------------------', i)
          if torch.sum(n1) /n1.shape[0] /prt0[i] < 0.48:
            loc =torch.argmin(pqArrayta)
            prt0 =torch.vstack((prt0, prt0[i]/6.))
            prt0[i] -=prt0[-1]
            prt1 =torch.vstack((prt1,(observation[:,loc].reshape(prt1[i].unsqueeze(0).shape))))
            #loc =torch.argmin(item)
            #print(prt1[i].shape, observation[:,loc].shape)
            #prt1[i] =observation[:,loc].reshape(prt1[i].shape)
            prt2 =torch.vstack((prt2,(prt2[i].unsqueeze(0))))
            prt3 =torch.vstack((prt3,(prt3[i].unsqueeze(0))))
            prt4 =torch.vstack((prt4,(prt4[i].unsqueeze(0))))
            prt5 =torch.hstack((prt5,(prt5[i].unsqueeze(0))))
            print('--------------------------------------------------------',i)
            print(prt1)
            print('--------------------------------------------------------',i)
            nlprt =1
            #prtn =(prt0, prt1, prt2, prt3, prt4)
            #print(prt)

      """#"
      for i in range(prt5.shape[0]):
        if prt5[i] >0:
          prt0 =torch.vstack((prt0, (prt0[i]/2.).unsqueeze(0)))
          prt0[i] -=prt0[-1]
          prt1 =torch.vstack((prt1,(prt1[i]).unsqueeze(0)))
          prt2 =torch.vstack((prt2,(prt2[i]).unsqueeze(0)))
          prt3 =torch.vstack((prt3,(prt3[i]).unsqueeze(0)))
          prt4 =torch.vstack((prt4,(prt4[i]).unsqueeze(0)))
          prt5 =torch.hstack((prt5,(prt5[i]).unsqueeze(0)))
          nlprt =1
      """#"
      prt =(prt0, prt1, prt2, prt3, prt4, prt5)
      print('estimateCluster loop one is over')
      print()
      #print(prt)
      if nlprt +nmprt ==0:
        break;
      else:
        dd2 =1
        if oldprt[0].shape ==prt[0].shape:
          tt =np.prod(oldprt[0].shape)
          dd0 =oldprt[0].reshape(tt)
          tt =np.prod(prt[0].shape)
          dd1 =prt[0].reshape(tt)
          tt =np.prod(oldprt[1].shape)
          dd0 =torch.hstack((dd0, oldprt[1].reshape(tt)))
          tt =np.prod(prt[1].shape)
          dd1 =torch.hstack((dd1, prt[1].reshape(tt)))
          tt =np.prod(oldprt[2].shape)
          dd0 =torch.hstack((dd0, oldprt[2].reshape(tt)))
          tt =np.prod(prt[2].shape)
          dd1 =torch.hstack((dd1, prt[2].reshape(tt)))
          dd2 =torch.sum((dd0 -dd1) *(dd0 -dd1))
          #print(dd0)
          #print(dd1)
          #print(dd0 -dd1, 'dd0 -dd1')
          #print(dd2, 'dd2', dd2.shape, 'line %d in '%sys._getframe().f_lineno, sys._getframe().f_code.co_name)
        oldprt =prt
        
        td0 =self.forward(observation, prt, mu =mu, iterations =iterations)
        prt =td0[0]
        iteration +=td0[1]
        mu =td0[2]
        #print(prt)
        (prt0, prt1, prt2, prt3, prt4, prt5) =prt
        #两次聚合后相差不大可以结束循环
        if dd2 <tol:
          for i in range(prt5.shape[0]):
            #print(prt0.shape, observation.shape)
            if prt5[i] >0:
              nmprt =1
              a =prt1 -prt1[i].unsqueeze(0)
              a =torch.matmul(torch.transpose(a, -1,-2), a).squeeze(-1).squeeze(-1)
              a =a *torch.where(prt5 ==0, 1, -1)
              sorted, indices =torch.sort(a)
              loc =torch.where(sorted ==0)
              if loc[0] ==sorted.shape[0] -1:
                indexf =sorted[loc[0]]
              else:
                indexf =sorted[loc[0] +1]
              indexf =torch.where(a ==indexf)[0]
              print(a, 'a', 'line %d in '%sys._getframe().f_lineno, sys._getframe().f_code.co_name)
              print(i, indexf, prt5, 'line %d in '%sys._getframe().f_lineno, sys._getframe().f_code.co_name)
          
              if prt5[indexf] >=0:
                prt5[indexf] =0
                prt5[i] =-(indexf +1)
              else:
                prt5[i] =prt5[indexf]
          
          print(prt5, 'prt5', 'line %d in '%sys._getframe().f_lineno, sys._getframe().f_code.co_name)
          for i in range(prt5.shape[0]):
            if prt5[i] <0:
              indexf =prt5[i]
              while indexf <0:
                if prt5[-int(indexf) -1] <0:
                  indexf =prt5[-int(prt5[i]) -1]
                else:
                  break;
              prt0[-int(indexf) -1] +=prt0[i]
          for i in range(prt0.shape[0] -1, -1, -1):
            print(i,'  +++++++++++++++++++++++++++')
            if prt5[i] <0:
              print(i, int(prt5[i]), prt0)
              prt0 =torch.vstack((prt0[0:i], prt0[i+1:]))
              prt1 =torch.vstack((prt1[0:i], prt1[i+1:]))
              prt2 =torch.vstack((prt2[0:i], prt2[i+1:]))
              prt3 =torch.vstack((prt3[0:i], prt3[i+1:]))
              prt4 =torch.vstack((prt4[0:i], prt4[i+1:]))
              prt5 =torch.hstack((prt5[0:i], prt5[i+1:]))
              print(prt0, prt1, prt2, prt3, prt4, prt5)
              print('+++++++++++++++++++++++++++')


          prt =(prt0, prt1, prt2, prt3, prt4, prt5)
          #print(mu[-2:], 'mu')
          #print(prt1.squeeze(-1).detach().cpu().numpy())
          mu.append(prt1.squeeze(-1).detach().cpu().numpy())
          #print(mu[-3:], 'mu')
          break;

    return (prt, iteration, mu)

  def estimateCluster(self, observation, prt, mu =None, tol = 1e-6, iterations=1000):
    over =0
    td0 =self.forward(observation, prt, mu =mu, tol =tol, iterations =iterations)
    prt =td0[0]
    iteration =td0[1]
    mu =td0[2]
    print('enter estimateCluster loop=================================')
    print(prt)
    (prt0, prt1, prt2, prt3, prt4, prt5) =prt
    oldprt =prt
    while over ==0:
      nmprt =0
      print(prt0, prt1, prt2, prt3, prt4, prt5, 'line %d in '%sys._getframe().f_lineno, sys._getframe().f_code.co_name)
      if prt0.shape[0] >1:
        #多点判断（聚点间距小于方差？）（两个聚点所代表的点集属于同一分布）
        shapek =tuple(np.hstack((np.asarray([len(prt0)], dtype=int),np.ones(len(observation.shape), dtype=int))))
        #print(shapek)
        pqArray =torch.tile(observation, shapek)
        a =observation -prt1
        b =torch.matmul(prt3, a)
        pqArrayta =torch.abs(torch.matmul(torch.transpose(a, 0,1).unsqueeze(1),
          torch.transpose(b, 0,1).unsqueeze(-1)).squeeze(-1).squeeze(-1))
        for i in range(prt0.shape[0]):
          #if prt5[i] >0:
            #聚点周围1/2范围内点个数大于1/2
            '''
            a =observation -prt1[i]
            b =torch.matmul(prt3[i], a)
            #print(a.shape)
            pqArrayta =torch.abs(torch.matmul(torch.transpose(a,-1,-2).unsqueeze(-2),
              torch.transpose(b, -1, -2).unsqueeze(-1)).squeeze(-1).squeeze(-1))
            '''
            a =observation -prt1[i]
            print(prt3[i].shape, a.shape)
            b =torch.matmul(prt3[i], a)
            print(torch.transpose(a, 0,1).unsqueeze(1).shape, torch.transpose(b, 0,1).unsqueeze(-1).shape)
            pqArrayta =torch.abs(torch.matmul(torch.transpose(a, 0,1).unsqueeze(1),
              torch.transpose(b, 0,1).unsqueeze(-1)).squeeze(-1).squeeze(-1))
            n1 =torch.where(pqArrayta <0.4549364231195727, 1., 0.,)
            print(torch.sum(n1), n1.shape[0], prt0[i])
            print(prt1[i], torch.sum(n1) /n1.shape[0] /prt0[i], '--------------------', i)
            if torch.sum(n1) /n1.shape[0] /prt0[i] > 0.39: #0.52:  
              #找到当前聚点最近的聚点，合并两点
              nmprt =1
              a =prt1 -prt1[i].unsqueeze(0)
              a =torch.matmul(torch.transpose(a, -1,-2), a).squeeze(-1).squeeze(-1)
              a =a *torch.where(prt5 <0, -1,1)
              sorted, indices =torch.sort(a)
              loc =torch.where(sorted ==0)
              if loc[0] ==sorted.shape[0] -1:
                indexf =sorted[loc[0]]
              else:
                indexf =sorted[loc[0] +1]
              indexf =torch.where(a ==indexf)[0]
              print(a, 'a', 'line %d in '%sys._getframe().f_lineno, sys._getframe().f_code.co_name)
              print(i, indexf, prt5, 'line %d in '%sys._getframe().f_lineno, sys._getframe().f_code.co_name)

              if prt5[indexf] >=0:
                prt5[indexf] =0
                prt5[i] =-(indexf +1)
              else:
                prt5[i] =prt5[indexf]
            
        print(prt5, 'prt5', 'line %d in '%sys._getframe().f_lineno, sys._getframe().f_code.co_name)
        for i in range(prt5.shape[0]):
          if prt5[i] <0:
            indexf =prt5[i]
            print(indexf, 'line %d in '%sys._getframe().f_lineno, sys._getframe().f_code.co_name)
            while indexf <0:
              print(indexf , prt5[-int(indexf) -1], 'line %d in '%sys._getframe().f_lineno, sys._getframe().f_code.co_name)
              if prt5[-int(indexf) -1] <0:
                indexf =prt5[-int(prt5[i]) -1]
              else:
                break;
            prt0[-int(indexf) -1] +=prt0[i]
        for i in range(prt0.shape[0] -1, -1, -1):
          print(i,'  +++++++++++++++++++++++++++')
          if prt5[i] <0:
            print(i, int(prt5[i]), prt0)
            prt0 =torch.vstack((prt0[0:i], prt0[i+1:]))
            prt1 =torch.vstack((prt1[0:i], prt1[i+1:]))
            prt2 =torch.vstack((prt2[0:i], prt2[i+1:]))
            prt3 =torch.vstack((prt3[0:i], prt3[i+1:]))
            prt4 =torch.vstack((prt4[0:i], prt4[i+1:]))
            prt5 =torch.hstack((prt5[0:i], prt5[i+1:]))
            print(prt0, prt1, prt2, prt3, prt4, prt5)
            print('+++++++++++++++++++++++++++')
          
        #多点判断（点数太少，不具统计意义，与周围合并）
        for i in range(prt5.shape[0]):
          #print(prt0.shape, observation.shape)
          if prt0[i] *observation.shape[1] <70:
            nmprt =1
            a =prt1 -prt1[i].unsqueeze(0)
            a =torch.matmul(torch.transpose(a, -1,-2), a).squeeze(-1).squeeze(-1)
            a =a *torch.where(prt5 <0, -1,1)
            sorted, indices =torch.sort(a)
            loc =torch.where(sorted ==0)
            if loc[0] ==sorted.shape[0] -1:
              indexf =sorted[loc[0]]
            else:
              indexf =sorted[loc[0] +1]
            indexf =torch.where(a ==indexf)[0]
            print(a, 'a', 'line %d in '%sys._getframe().f_lineno, sys._getframe().f_code.co_name)
            print(i, indexf, prt5, 'line %d in '%sys._getframe().f_lineno, sys._getframe().f_code.co_name)

            if prt5[indexf] >=0:
              prt5[indexf] =0
              prt5[i] =-(indexf +1)
            else:
              prt5[i] =prt5[indexf]

        print(prt5, 'prt5', 'line %d in '%sys._getframe().f_lineno, sys._getframe().f_code.co_name)
        for i in range(prt5.shape[0]):
          if prt5[i] <0:
            indexf =prt5[i]
            while indexf <0:
              if prt5[-int(indexf) -1] <0:
                indexf =prt5[-int(prt5[i]) -1]
              else:
                break;
            prt0[-int(indexf) -1] +=prt0[i]
        for i in range(prt0.shape[0] -1, -1, -1):
          print(i,'  +++++++++++++++++++++++++++')
          if prt5[i] <0:
            print(i, int(prt5[i]), prt0)
            prt0 =torch.vstack((prt0[0:i], prt0[i+1:]))
            prt1 =torch.vstack((prt1[0:i], prt1[i+1:]))
            prt2 =torch.vstack((prt2[0:i], prt2[i+1:]))
            prt3 =torch.vstack((prt3[0:i], prt3[i+1:]))
            prt4 =torch.vstack((prt4[0:i], prt4[i+1:]))
            prt5 =torch.hstack((prt5[0:i], prt5[i+1:]))
            print(prt0, prt1, prt2, prt3, prt4, prt5)
            print('+++++++++++++++++++++++++++')
          
      print(prt0, prt1, prt2, prt3, prt4, prt5)
      
      #少点判断
      #1、聚点周围点概率都比较低（聚点周围无点）
      #2、聚点可能需要增加（两种分布距离很近甚至中间有重叠）
      #利用标准正态分布概率分布函数P(X<=0.6744897501960817)=0.75
      #0.6744897501960817*0.6744897501960817 =0.4549364231195727
      nlprt=0
      print(observation, 'observation')
      #print(prt, 'prt')
      for i in range(prt5.shape[0]):
        print(prt5, 'prt5', i, 'line %d in '%sys._getframe().f_lineno, sys._getframe().f_code.co_name)
        if prt5[i] >0:
          a =observation -prt1[i]
          b =torch.matmul(prt3[i], a)
          #print(a.shape)
          pqArrayta =torch.abs(torch.matmul(torch.transpose(a,-1,-2).unsqueeze(-2),
            torch.transpose(b, -1, -2).unsqueeze(-1)).squeeze(-1).squeeze(-1))
          n1 =torch.where(pqArrayta <0.4549364231195727, 1., 0.,)
          print(n1, 'nl')
          print(n1.shape, torch.sum(n1), prt0[i])
          print(torch.sum(n1) /n1.shape[0] /prt0[i], '--------------------', i)
          if torch.sum(n1) /n1.shape[0] /prt0[i] < 0.48:
            loc =torch.argmin(pqArrayta)
            prt0 =torch.vstack((prt0, prt0[i]/6.))
            prt0[i] -=prt0[-1]
            prt1 =torch.vstack((prt1,(observation[:,loc].reshape(prt1[i].unsqueeze(0).shape))))
            #loc =torch.argmin(item)
            #print(prt1[i].shape, observation[:,loc].shape)
            #prt1[i] =observation[:,loc].reshape(prt1[i].shape)
            prt2 =torch.vstack((prt2,(prt2[i].unsqueeze(0))))
            prt3 =torch.vstack((prt3,(prt3[i].unsqueeze(0))))
            prt4 =torch.vstack((prt4,(prt4[i].unsqueeze(0))))
            prt5 =torch.hstack((prt5,(prt5[i].unsqueeze(0))))
            print('--------------------------------------------------------',i)
            print(prt1)
            print('--------------------------------------------------------',i)
            nlprt =1
            #prtn =(prt0, prt1, prt2, prt3, prt4)
            #print(prt)

      """#"
      for i in range(prt5.shape[0]):
        if prt5[i] >0:
          prt0 =torch.vstack((prt0, (prt0[i]/2.).unsqueeze(0)))
          prt0[i] -=prt0[-1]
          prt1 =torch.vstack((prt1,(prt1[i]).unsqueeze(0)))
          prt2 =torch.vstack((prt2,(prt2[i]).unsqueeze(0)))
          prt3 =torch.vstack((prt3,(prt3[i]).unsqueeze(0)))
          prt4 =torch.vstack((prt4,(prt4[i]).unsqueeze(0)))
          prt5 =torch.hstack((prt5,(prt5[i]).unsqueeze(0)))
          nlprt =1
      """#"
      prt =(prt0, prt1, prt2, prt3, prt4, prt5)
      print('estimateCluster loop one is over')
      print()
      #print(prt)
      if nlprt +nmprt ==0:
        break;
      else:
        dd2 =1
        if oldprt[0].shape ==prt[0].shape:
          tt =np.prod(oldprt[0].shape)
          dd0 =oldprt[0].reshape(tt)
          tt =np.prod(prt[0].shape)
          dd1 =prt[0].reshape(tt)
          tt =np.prod(oldprt[1].shape)
          dd0 =torch.hstack((dd0, oldprt[1].reshape(tt)))
          tt =np.prod(prt[1].shape)
          dd1 =torch.hstack((dd1, prt[1].reshape(tt)))
          tt =np.prod(oldprt[2].shape)
          dd0 =torch.hstack((dd0, oldprt[2].reshape(tt)))
          tt =np.prod(prt[2].shape)
          dd1 =torch.hstack((dd1, prt[2].reshape(tt)))
          dd2 =torch.sum((dd0 -dd1) *(dd0 -dd1))
          #print(dd0)
          #print(dd1)
          #print(dd0 -dd1, 'dd0 -dd1')
          #print(dd2, 'dd2', dd2.shape, 'line %d in '%sys._getframe().f_lineno, sys._getframe().f_code.co_name)
        oldprt =prt
        
        td0 =self.forward(observation, prt, mu =mu, iterations =iterations)
        prt =td0[0]
        iteration +=td0[1]
        mu =td0[2]
        #print(prt)
        (prt0, prt1, prt2, prt3, prt4, prt5) =prt
        #两次聚合后相差不大可以结束循环
        if dd2 <tol:
          for i in range(prt5.shape[0]):
            #print(prt0.shape, observation.shape)
            if prt5[i] >0:
              nmprt =1
              a =prt1 -prt1[i].unsqueeze(0)
              a =torch.matmul(torch.transpose(a, -1,-2), a).squeeze(-1).squeeze(-1)
              a =a *torch.where(prt5 ==0, 1, -1)
              sorted, indices =torch.sort(a)
              loc =torch.where(sorted ==0)
              if loc[0] ==sorted.shape[0] -1:
                indexf =sorted[loc[0]]
              else:
                indexf =sorted[loc[0] +1]
              indexf =torch.where(a ==indexf)[0]
              print(a, 'a', 'line %d in '%sys._getframe().f_lineno, sys._getframe().f_code.co_name)
              print(i, indexf, prt5, 'line %d in '%sys._getframe().f_lineno, sys._getframe().f_code.co_name)
          
              if prt5[indexf] >=0:
                prt5[indexf] =0
                prt5[i] =-(indexf +1)
              else:
                prt5[i] =prt5[indexf]
          
          print(prt5, 'prt5', 'line %d in '%sys._getframe().f_lineno, sys._getframe().f_code.co_name)
          for i in range(prt5.shape[0]):
            if prt5[i] <0:
              indexf =prt5[i]
              while indexf <0:
                if prt5[-int(indexf) -1] <0:
                  indexf =prt5[-int(prt5[i]) -1]
                else:
                  break;
              prt0[-int(indexf) -1] +=prt0[i]
          for i in range(prt0.shape[0] -1, -1, -1):
            print(i,'  +++++++++++++++++++++++++++')
            if prt5[i] <0:
              print(i, int(prt5[i]), prt0)
              prt0 =torch.vstack((prt0[0:i], prt0[i+1:]))
              prt1 =torch.vstack((prt1[0:i], prt1[i+1:]))
              prt2 =torch.vstack((prt2[0:i], prt2[i+1:]))
              prt3 =torch.vstack((prt3[0:i], prt3[i+1:]))
              prt4 =torch.vstack((prt4[0:i], prt4[i+1:]))
              prt5 =torch.hstack((prt5[0:i], prt5[i+1:]))
              print(prt0, prt1, prt2, prt3, prt4, prt5)
              print('+++++++++++++++++++++++++++')


          prt =(prt0, prt1, prt2, prt3, prt4, prt5)
          #print(mu[-2:], 'mu')
          #print(prt1.squeeze(-1).detach().cpu().numpy())
          if torch.cuda.is_available():
            mu.append(prt1.squeeze(-1).detach().cpu().numpy())
          else:
            mu.append(prt1.squeeze(-1).numpy())
          #print(mu[-3:], 'mu')
          break;

    return (prt, iteration, mu)

def parsData(args):
  dlist =[]
  tstr=''
  for item in args.data:
    if item.isdigit() or item =='.' :
      tstr +=item
    elif tstr !='':
        print(tstr)
        dlist.append(float(tstr))
        tstr =''
  print(dlist, '行号为%d'%sys._getframe().f_lineno)
  
  return np.asarray(dlist)

def drawResult(le, Ndim, b, prt, td, u):
  fig = plt.figure()
  if Ndim ==2:
    ax = fig.add_subplot(111)
    ax.scatter(b[0,:], b[1,:], marker='.')
    ax.scatter(prt[1][:,0,0], prt[1][:,1,0], marker='+')
    #ax.scatter(td[:,0,0], td[:,1,0], c='r', marker='*')
    ut =np.asarray(u[0])
    ax.scatter(ut[:,0], ut[:,1], c='r', marker='s')
    ut =np.asarray(u[-1])
    ax.scatter(ut[:,0], ut[:,1], c='k', marker='8', alpha =0.3)
    ft =10./len(u)
    '''
    for item in u:
      ut =np.asarray(item)
      if (np.random.random(1) <ft):
        ax.plot(ut[:,0],ut[:,1])
    '''
  elif Ndim ==3:
    ax = fig.add_subplot(111, projection ='3d')
    ax.scatter(b[0,:], b[1,:], b[2,:], marker='.')
    ax.scatter(prt[1][:,0,0], prt[1][:,1,0], prt[1][:,2,0], marker='+')
    ax.scatter(td[i,0,0], td[i,1,0], td[i,2,0], c='r', marker='*')
    ut =np.asarray(u[0])
    ax.scatter(ut[:,0], ut[:,1], ut[:,2], c='r', marker='s')
    ut =np.asarray(u[-1])
    ax.scatter(ut[:,0], ut[:,1], ut[:,2], c='k', marker='8')
    for item in u:
      ut =np.asarray(item)
      ax.plot(ut[:,0],ut[:,1],ut[:,1], c='g')
    
  plt.show()

def testAlgo():
  
  parser =parseArgument()
  args = parser.parse_args()
  
  print(args)
  if args.gpu is not None:
    warnings.warn('You have chosen a specific GPU. This will completely '
                  'disable data parallelism.')
  em =myEm(args)
  
  a =1.2
  b =2.4
  t =[a,b]
  s2 =f'[[{a}, {b}],['
  print(s2)
  print(str(t))
  f = open("datas.txt", "w")
  Ndim =int(args.dimension)
  le =int(args.data)
  nclass =int(args.ncluster)
  for ti in range(10000):
    if args.category ==1:
      #b, prt =em.genNormData(Ndim)
      b, prt =em.genNdimNormData(nclass, Ndim)
      print(b.shape, prt, 'b.shape, prt')
      
    elif args.category ==0:
      b, prt =em.genBinomData(Ndim)
    #print(b.shape)
    #dlist =np.asarray([[0.6, 20, 80], [0.4, 100,40]], dtype =float)
    #dlist =np.asarray([[0.1, 10, 50], [0.6, 20, 80], [0.3, 100,40]], dtype =float)
    #dlist =np.asarray([[0.1, 10, 50], [0.3, 20, 80], [0.3, 100,40], [0.3, 110,40]], dtype =float)
    #dlist =np.asarray([[0.1, 10, 50], [0.2, 20, 80], [0.2, 100,40], [0.2, 110,40], [0.3, 120,40]], dtype =float)
    
    '''
    dlist =([[0.7113],[0.2887]],
            b[:,0:2].reshape(2,2,1),
            [[[1., 0],
              [0, 1]],

             [[1., 0],
              [0, 1.]]],
            [[[ 1., 0],
              [ 0., 1.]],

             [[ 1., 0.],
              [0.,  1.]]],
            [[1.],
             [1.]]
           )
    '''
    d0 =np.random.random(le)
    d0 =(d0/np.sum(d0)).reshape(le,1)
    
    dlist =(d0, np.transpose(b[:,0:le]).reshape(le,Ndim,1),
      np.ones([le,Ndim,Ndim]) *np.eye(Ndim).reshape(1,Ndim,Ndim),
      np.ones([le,Ndim,Ndim]) *np.eye(Ndim).reshape(1,Ndim,Ndim),
      np.ones([le,1])
    )
    markAdd=[]
    for j in range(le):
      markAdd.append(j +1)
    #print(dlist)
    #my =ff
    if torch.cuda.is_available():
      device = torch.device("cuda")
      ngpus_per_node = torch.cuda.device_count()
      b =torch.Tensor(b).cuda()
      dlist =(torch.Tensor(dlist[0]).cuda(), torch.Tensor(dlist[1]).cuda(),
        torch.Tensor(dlist[2]).cuda(), torch.Tensor(dlist[3]).cuda(),
        torch.Tensor(dlist[4]).cuda(), torch.Tensor(markAdd).cuda())
      u =[dlist[1].detach().cpu().numpy()]
    else:
      device = torch.device("cpu")
      b =torch.Tensor(b)
      dlist =(torch.Tensor(dlist[0]), torch.Tensor(dlist[1]),
        torch.Tensor(dlist[2]), torch.Tensor(dlist[3]),
        torch.Tensor(dlist[4]), torch.Tensor(markAdd))
      u =[dlist[1].numpy()]
    
    #dlist =ff
    print(ti, "  Device", device, ngpus_per_node)
    print("b", b.shape)
    #print(dlist, "dlist")
    start0 =time.time()
    #td0 =em.forward(b, dlist, mu =u, iterations=1000)
    td0 =em.estimateCluster(b, dlist, mu=u, iterations=1000)
    
    start0 =time.time() -start0
    start1 =time.time()
    #td1 =em.forwardTt(b, dlist)
    start1 =time.time() -start1
    print(td0[2][-1], 'mu', td0[1])
    print(prt,'prt')
    print(td0[0][0], ti)
    st =f'{ti}\n[[{start0}, {start1}]],\n==='
    st +=str(td0[0])
    st +='===, --------------' +str(td0[1]) +'---------],\n==='
    #st +=str(td1[0])
    #st +='], '
    #st +=str(td1[1])
    #st +='],\n'
    st +=str(prt)
    st +=f'===\n\n'
    f.write(st)
    f.flush()
    
    if torch.cuda.is_available():
      c =b.detach().cpu().numpy()
      td=td0[0][1].detach().cpu().numpy()
    else:
      c =b.numpy()
      td=td0[0][1].numpy()
    st ='outfiled' +str(ti)
    np.save(st,c) 
    st ='outfilep' +str(ti)
    np.savez(st, prt[0],prt[1], prt[2], prt[3], prt[4])
    st ='outfilel' +str(ti)
    if torch.cuda.is_available():
      np.savez(st, dlist[0].detach().cpu().numpy(),dlist[1].detach().cpu().numpy(),
        dlist[2].detach().cpu().numpy(), dlist[3].detach().cpu().numpy(), dlist[4].detach().cpu().numpy())
    else:
      np.savez(st, dlist[0].numpy(),dlist[1].numpy(),
        dlist[2].numpy(), dlist[3].numpy(), dlist[4].numpy())
    
    u =(td0[2])
    #print(u.shape)
    #u =u.squeeze(-1)
    #u =[dlist[1].squeeze(-1).detach().cpu().numpy()]
    drawResult(le, Ndim, c, prt, td, u)
  f.close()


if __name__=='__main__':
  print(time.time())
  np.random.seed(int(time.time()))
  
  testAlgo()
