### functions  ####
odgmm = function(X,n_max_comp,debug=FALSE){
    # X: data, a 1-dimensional vector
    # n_max_comp: maximum number of components
    
    # perform overdispersed Gaussian mixture modeling on input data X, with maximum K components
    
    # functions for transforming parameters
    alpha2mu = function(alpha){
        # global var mu0
        return ( cumsum(c(mu0,alpha)) )
    }
    mu2alpha = function(mus){
        return ( diff(mus) )
    }
    beta2sigma = function(beta){
      # beta is for the variance
      # global var sigma0
      return( cumprod( c(sigma0,sqrt(beta)) ) )
    }
    beta2var = function(beta){
        # global var sigma0
        return( cumprod(c(var0,beta)) )
    }
    sigma2beta = function(sigmas){
        return (exp(diff(log(sigmas)))^2)
    }
    var2beta = function(vars){
        return (exp(diff(log(vars))))
    }
    
    # functions for EM algorithms
    estep = function(X, alpha, beta, ws){
        mus = alpha2mu(alpha)
        sds = beta2sigma(beta)
        K = length(alpha)
        N = length(X)
        Z = matrix(0,nrow=N,ncol=K+2)
        XX = matrix(rep(X,K+1), nrow=N, ncol = K+1)
        XX = t(dnorm(t(XX), mean = mus, sd = sds, log = TRUE))
        XX[XX==Inf]=0
        Z[,seq(K+1)] = XX + matrix( rep( log(ws[seq(K+1)]), each=N ), nrow=N ) 
        Z[,K+2] = log(ws[K+2]) + uni_log_density
        
        if(any(is.na(Z))){
            print('Z contains NA')
        }
        
        maxz = apply(Z,1,max)
        Z = exp(Z - maxz)
        return (Z/apply(Z,1,sum))
    }
    cal_alpha = function(X,Z,alpha,beta,j){
        mus = alpha2mu(alpha)
        vars = beta2var(beta)
        
        # variance of some component might be 0, limit it to be at least min_variance
        vars[vars<min_variance] = min_variance
        
        K = length(alpha)
        N = length(X)
        XX = rep(0,N)
        ZZ = rep(0,N)
        for(k in seq(j,K))
        {
            tmp = Z[,(k+1)]/vars[k+1]
            XX = XX + (X - mus[k+1] + alpha[j])*tmp
            ZZ = ZZ + tmp
        }
        tsum = sum(ZZ)  # vars can be 0
        if(is.na(tsum) || tsum==0){
            return(NA)
        }else{
          # return (sum(XX)/tsum)
          ret_val = sum(XX)/tsum
          if(ret_val<min_alpha){
            return(min_alpha)
          }else{
            return(ret_val)
          }
            
        }
    }
    cal_beta = function(X,Z,alpha,beta,j){
        mus = alpha2mu(alpha)
        vars = beta2var(beta)
        
        # variance of some component might be 0, limit it to be at least min_variance
        vars[vars<min_variance] = min_variance
        
        K = length(alpha)
        N = length(X)
        XX = rep(0,N)
        for(k in seq(j,K))
        {
            XX = XX + Z[,(k+1)] * (X - mus[k+1])^2 * beta[j] / vars[k+1]
        }
        tsum = sum(Z[,seq(j+1,K+1)])
        if(is.na(tsum) || tsum==0){
            return(NA)
        }else{
          ret_val = sum(XX)/tsum
          if(ret_val<min_beta){
            return(min_beta)
          }else{
            return(ret_val)
          }
            
        }
    }
    cal_ws = function(Z){
        N = dim(Z)[1]
        ws = apply(Z,2,sum)/N
        return (ws)
    }
    mstep = function(X,Z,alpha,beta,ws){
        K = length(alpha)
        new_ws = cal_ws(Z)
        new_alpha = alpha
        new_beta = beta
        for(j in seq(K,1,-1))
        {
            new_alpha[j] = cal_alpha(X,Z,alpha,beta,j)
            new_beta[j] = cal_beta(X,Z,alpha,beta,j)
        }
        return(list(alpha=new_alpha,beta=new_beta,ws=new_ws))
    }
    exp_log_like = function(X, Z, alpha, beta, ws){
        mus = alpha2mu(alpha)
        sds = beta2sigma(beta)
        K = length(alpha)
        N = length(X)
        
        XX = matrix(rep(X,K+1), nrow=N, ncol = K+1)
        
        tmpX = t(dnorm(t(XX), mean = mus, sd = sds, log = TRUE))
        tmpX[tmpX==Inf] = 0
        tmpX = tmpX + matrix( rep( log(ws[seq(K+1)]), each=N ), nrow=N) 
        tmpX = cbind(tmpX, rep(log(ws[K+2])+uni_log_density, N))
        tmpX[tmpX==-Inf] = 0
        return(sum(tmpX*Z))
    }
    elbo = function(X,Z,alpha,beta, ws){
        LZ = Z
        LZ[Z!=0] = log(Z[Z!=0])
        entropy = -1 * Z * LZ
        
        lb = exp_log_like(X,Z,alpha,beta, ws)+sum(entropy)
        if(is.na(lb) || lb==-Inf){
            # exp_log_like(X,Z,alpha,beta, ws)
            print("lower bounder is na or -Inf.")
        }
        return (lb)
    }
    bic = function(X,Z,alpha,beta,ws){
        N = length(X)
        K = length(alpha)
        res = -2*elbo(X,Z,alpha,beta,ws) + (3*K+2)*log(N) # the smaller bic, the better model
        return(res)
    }
    # generate the result
    gen_res = function(alpha=NA,beta=NA,ws=NA,logml=NA,Z=NA,bic=NA){
        if(any(is.na(alpha))){
            mus = NA
        }else{
            mus = alpha2mu(alpha)
        }
        if(any(is.na(beta))){
            sds = NA
        }else{
            sds = beta2sigma(beta)
        }
        return(list(alpha=alpha,beta=beta,ws=ws,logml=logml,Z=Z,bic=bic,mus=mus,sigmas=sds))
    }
    em_optim0 = function(X, init_mus, init_sgs, init_ws, nround=200, debug=FALSE){
        alpha = mu2alpha(init_mus)
        beta = var2beta(init_sgs)
        ws = init_ws
        Z = estep(X, alpha, beta, ws)
        logml = -Inf
        logml_res = rep(NA,nround)
        curr_res = gen_res()
        for (i in seq(nround)){
            if(debug){
                print(paste0('iteration=',i,'  logml=',logml))
            }
            Z = estep(X, alpha, beta, ws)
            res = mstep(X,Z,alpha,beta,ws)
            
            if(any(is.na(res$alpha)) || any(is.na(res$beta)) || any(is.na(res$ws))){
                print(paste0('Inference failed after ',i,' iterations.'))
                return(curr_res)
                # return(gen_res())
            }
            logml_new = elbo(X, Z, res$alpha, res$beta, res$ws)
            logml_res[i] = logml_new
            alpha = res$alpha
            beta = res$beta
            ws = res$ws
            
            if(is.na(logml_new) || is.na(logml)){
                print(paste0('Inference failed after ',i,' iterations.'))
                return(curr_res)
                # return(gen_res())
            }
            if(logml_new==-Inf){
                print(logml_new)
                print(logml)
            }
            
            if( abs(logml_new-logml) < abs(1e-6*logml) )
            {
                logml = logml_new
                break
            }
            logml = logml_new
            curr_res = gen_res(alpha,beta,ws,logml,Z)
        }
        if(i==nround){
            print(paste0('Run all ',i,' iterations.',' logml=',logml))
        }else{
            print(paste0('Converge in ',i,' iterations.',' logml=',logml))
        }
        if(debug){
            idx = seq(3,nround)
            plot(idx,logml_res[idx])
        }
        # return(gen_res(alpha,beta,ws,logml,Z))
        return(curr_res)
    }
    disp_paras = function(alpha,beta,ws,logml, infostr=''){
        cat(paste( infostr,
        paste0('alpha=',sprintf("%.3f", alpha),collapse=" "),
        paste0('beta=',sprintf("%.3f", beta),collapse=" "),
        paste0('ws=',sprintf("%.3f", ws),collapse=" "),
        paste0('mus=',sprintf("%.3f", alpha2mu(alpha)),collapse=" "),
        paste0('vars=',sprintf("%.3f", beta2var(beta)),collapse=" "),
        paste0('logml=',logml),
        '','',
        sep="\n" ))
    }
    em_algo = function(X,K,debug=FALSE){
      stopifnot(length(X)>0)
      if(length(X)==1){
        return(gen_res(alpha = X, beta = min_beta, ws=c(0,1,0), Z= matrix(c(0,1,0),nrow=1)))
      }
      # depends on global mu0, sigma0
      if(var(X)<=var0 && K>1){
        return(gen_res())
      }else if(var(X)<=var0){
        xqu = mean(X)
        squ = c(var(X),var0)
      }else{
        xqu = unname(quantile(X, probs = seq(0.05, 1, 0.05)))
        squ = seq(log(var0),log(var(X)),length.out=21)
      }
        
        n_trial = 20
        res_list = vector("list",n_trial)
        logml_arr = rep(NA,n_trial)
        for(i in seq(n_trial)){
            if(debug){
                print(paste0(i,'th trial. ntrial=',n_trial))
            }
            tmpw = runif(K+2)+1
            init_ws = tmpw/sum(tmpw)
            init_mus = c(mu0, sort( sample(xqu,K)  ))
            init_sgs = c(var0, exp( sample(squ[-1],K,replace = TRUE))  )
            #res_list[[i]] = em_optim(X, init_mus, init_sgs, init_ws, debug=debug)
            res_list[[i]] = em_optim0(X, init_mus, init_sgs, init_ws, debug=FALSE)
            logml_arr[i] = res_list[[i]][4]
        }
        max_ind = which.max(logml_arr)
        
        if(length(max_ind)==0){
          res = res_list[[1]]
          res$bic = Inf
        }else{
          res = res_list[[max_ind]]
          res$bic = bic(X,res$Z,res$alpha,res$beta,res$ws)
        }
        
        return(res)
    }
    
    # main code for odgmm
    # initialization
    # mu0,sigma0
    mu0 = 0
    sigma0 = 0.1
    var0 = sigma0^2
    
    min_variance = 1e-6  # minimum variance allowed
    min_alpha = 3*sigma0 # minimum alpha allowed
    min_beta = 0.5  # mimimum beta allowed
    
    # assign all 0 directly to the first component
    n_data_point = length(X)
    non_zero_inds = which(X!=0)
    zero_inds = which(X==0)
    non_zero_weight = length(non_zero_inds)/n_data_point
    zero_weight = 1-non_zero_weight
    
    # in case input data are all zeros
    if(length(non_zero_inds)==0){
      return(gen_res(alpha=min_alpha, beta = min_beta, ws=c(1,0,0), Z= matrix(c(1,0,0),nrow=1)))
    }
    
    X = X[non_zero_inds]
    
    if(n_max_comp<2){
      stop(paste0("n_max_comp=",n_max_comp," must at least be 2. One dropout component and one signal component."))
    }
    K = n_max_comp - 1
    
    uni_log_density = -log(max(X))
    
    # auxiliary function
    norm_center = function(w) {x=w[2:(length(w)-1)]; return(x/sum(x))}
    
    # list(alpha,beta,ws,logml,Z)
    #curr_res = list(NA,NA,NA,NA,NA,NA)
    curr_res = gen_res()
    curr_bic = NA
    for(i in seq(K,1,-1)){
        cat("\n")
        print(paste0('Model estimation with ',i+1,' components'))
        flag = TRUE
        res = em_algo(X,i,debug=debug)
        res_bic = res$bic
        print(paste0('Model estimation with ',i+1,' components  ',"res_bic=",res_bic))
        if(is.na(curr_bic)){
            flag=FALSE
        }
        else if(length(res$ws)>1 && any(res$ws[2:(length(res$ws)-1)]<0.01)){
            # if some "real" (not dropout, uniform) component has less than 1% weight
            flag=FALSE
        }
        else if(res_bic-curr_bic < 0){
            flag=FALSE
        }
        if(!flag){
            curr_bic = res_bic
            curr_res = res
        }else{
            break
        }
        cat("curr_bic = ",curr_bic,"\n")
    }
    
    # add the non-zero data into the zero components
    # change weights
    ws = non_zero_weight * curr_res$ws
    ws[1] = ws[1] + zero_weight
    curr_res$ws = ws
    # change label
    lab_vec = rep(1,n_data_point)
    lab_vec[non_zero_inds] = apply(curr_res$Z, 1, which.max)
    curr_res$label = lab_vec
    z_ind = which(names(curr_res) == "Z")
    curr_res = curr_res[-z_ind ]
    
    alpha = curr_res$alpha
    beta = curr_res$beta
    ws = curr_res$ws
    logml = curr_res$logml
    infostr = paste0('final model: ',length(alpha)+1,' components, bic=', curr_bic, ' ')
    disp_paras(alpha,beta,ws,logml,infostr)
    
    return(curr_res)
}

gmmpdf = function(x, mus, sigmas, ws, log=FALSE){
    K = length(mus)
    N = length(x)
    y = rep(0,N)
    for(k in seq(K)){
        y = y + ws[k] * dnorm(x,mus[k],sigmas[k])
    }
    if(log){
        return(log(y))
    }else{
        return (y)
    }
}
