#2013/02/12
#Author: Tomonori Oura
# Implementation for nested mixture model
#

calc_w <- function(d,z,class)
{
	nc <- ncol(z)
	w_ini <- array(0,dim=c(ncol(d),2,nc))
	w_ini[,1,1] <- 1
	for(c.ind in c(2:nc))
	{
#		c.ind.genes <- gene.components == c.ind
		c.ind.genes <- z[, c.ind]
		sample.exp <- d[c.ind.genes, ]
#		cat(
#			"dimension of sample.exp for "
#			,c.ind
#			,c("st","nd","rd","th")[pmin(4, c.ind)]
#			," class\n",sep="")
		#print(dim(sample.exp))
		if(is.vector(sample.exp))
		{
			cat("calc_w(): vector!\n")
			cl.out <- kmeans(sample.exp, 2)
			mean1 <- mean(sample.exp[cl.out$cl==1])
			mean2 <- mean(sample.exp[cl.out$cl==2])
		}else{
			cl.out <- kmeans(t(sample.exp), 2)
			mean1 <- mean(sample.exp[, cl.out$cl==1])
			mean2 <- mean(sample.exp[, cl.out$cl==2])
		}
		#Assignment outlier sample component
		#outlier.class.ind <- ifelse(mean1 > mean2,1,2)
		outlier.class.ind <- ifelse(abs(mean1) > abs(mean2),1,2)
		high.ind <- cl.out$cl == outlier.class.ind
		#Divide samples based on each gene components using kmeans
		#Then assign outlier components to class with higer mean expression
		# with class 2 (cancer class)
		oe.ind <- class == 2 & high.ind
		#print(table(oe.ind))
		w_ini[!oe.ind, 1, c.ind] <- 1
		w_ini[ oe.ind, 2, c.ind] <- 1
	}#each class
	return(w_ini)
}# calc_w


doInitClass <- function(
	d
	,class#clinical class, constraints null component sample
	,n.class=4# Number of components: 1 null class + (nc - 1) non null class
	,size.limit=3#minimum number of members in gene component
)
{
	call <- match.call()
	#Estimate initial value

	cat("## doInitClass()\n")
	
	if(length(n.class) > 1){
		ncs <- n.class
	}else if(length(n.class) == 1){
		ncs <- c(n.class - 1, n.class, n.class + 1)#Number of components
	}
	initResult <- NULL
	res.clust.arr <- byCluster2(x=d, ncs=ncs, method="cor")
	for(i in seq(ncs))
	{
		nc.this <- ncs[i]
		cat("*Number of components:",nc.this,"\n")
		
		#Divide DE genes into nc.this components
		#method <- c("euclidean")
		res.clust <- res.clust.arr[[i]]
		cat("#res.clust\n")
		print(table(res.clust$class))
		
		#assign non-DE gene component
		comp.temp <- res.clust$class
		diff <- rep(0, nc.this)
		for(c.ind in c(1:nc.this)){
			this.c.ind <- comp.temp == c.ind
			diff[c.ind]  <- abs(mean(d[this.c.ind, class==1]) - mean(d[this.c.ind, class==2]))
		}
		nonDE.ind <- which.min(diff)
		gene.components <- comp.temp
		gene.components[comp.temp == nonDE.ind] <- 1
		gene.components[comp.temp == 1] <- nonDE.ind
		z_ini <- outer(gene.components, seq(nc.this), "==")
		#z_ini is created

		cat("z_ini:\n")
		print(colSums(z_ini))
		#Calculate w_ini
		w_ini <- calc_w(d=d, z=z_ini, class=class)
		cat("w_ini:\n")
		print(t(colSums(w_ini)))
		#w_ini is created
		#Calculate parameters given z_ini and w_ini
		mOut <- mStep_e(d=d,z=z_ini,w=w_ini)
		mu <- mOut$mu
		var <- mOut$var

		minimum.components <- min(colSums(z_ini))
		while(minimum.components < size.limit)
		{
			cat("# Shuffle in doInitClass2()\n")
			sh.res <- shuffle(
				d=d
				,z=z_ini
				,w=w_ini
				,size.limit=size.limit
				,mu=mu
				,var=var
			)
			if(sh.res$result == FALSE){next}
			z_ini <- sh.res$z
			w_ini <- sh.res$w
			k <- sh.res$k
			l <- sh.res$l
			mu <- sh.res$mu
			var <- sh.res$var
			minimum.components <- min(colSums(z_ini))
		}
		
		#Calculate posterior probability
		pp <- postProb_d(
			d=d
			,z=z_ini
			,w=w_ini
			,mu=mu
			,var=var)
	
		z_upd <- pp$z
		k_upd <- pp$k
	
		minimum.components2 <- min(colSums(z_upd))
		if(minimum.components2 <= size.limit)
		{
			cat("Warnings: minimum components2 = ",minimum.components2,"\n")
			cat("z_upd\n")
			print(colSums(z_upd))
#			stop()
		}
	
		cat("z_upd\n")
		print(colSums(z_upd))
	
		pp.b <- postProb_e(
			d=d
			,z=z_ini
			,w=w_ini
			,y=class
			,mu=mu
			,var=var
		)
		w_upd <- pp.b$w
		l_upd <- pp.b$k
		cat("w_upd\n")
		print(t(colSums(w_upd)))
	
		print(mOut)
	
		initResult[[i]] <- 
			list(
			nc=nc.this
			,z=z_upd
			,w=w_upd
			,k=k_upd
			,l=l_upd
			,mu=mu
			,var=var
		)
		
	}#each number of components

#	comment(initResult) <- call
	return(initResult)

}# doInitClass

plotW <- function(w, z, ord=NULL, ...)
{
	n.comp <- dim(w)[3]
	sample.col <- w[, 2,]
	image(
		sample.col
		#sample.class.color
		,col=c("white", "black", "red", "red")
		,xlab="samples"
		,ylab=""
		,axes=F
		,...
	)
	box()
	labels <- paste(c(1:n.comp),"(", colSums(z),")",sep="")
	axis(side=2, at=c(0:(n.comp-1))/(n.comp-1), labels=labels, tick=FALSE, las=1)
}#plotW

makerowColors <- function(z=NULL)
{
	collist <- c(
		"gray20"
		,"red"
		,"green"
		,"blue"
		,"cyan"
		,"yellow"
		,"orange"
		,"darkred"
		,"lightblue"
		,"pink"#10
		,"purple"
		,"magenta"
		,"darkblue"
		,"brown"
		,"lightgreen"
		)
	if(is.null(z)){
		return(collist)
	}
	rowLevels <- apply(z,1,which.max)
	return(collist[rowLevels])
}# makerowColors

makecolColors <- function(w)
{
	if(!is.matrix(w[,2,-1])){
		colLevels <- w[,2,-1]
	}else{
		colLevels <- apply(w[,2,-1],1,sum)
	}
	levelnum <- dim(w)[3] - 1
	colColors <- gray(c(levelnum:0)/levelnum)[colLevels+1]
	return(colColors)
}# makecolColors


matplot2 <- function (x, y, type = "p", lty = 1:5, lwd = 1, pch = NULL, col = 1:6, 
    cex = NULL, bg = NA, xlab = NULL, ylab = NULL, xlim = NULL, 
    ylim = NULL, ..., add = FALSE, verbose = getOption("verbose")) 
{
    paste.ch <- function(chv) paste("\"", chv, "\"", sep = "", 
        collapse = " ")
    str2vec <- function(string) {
        if (nchar(string, type = "c")[1] > 1) 
            strsplit(string[1], NULL)[[1]]
        else string
    }
    xlabel <- if (!missing(x)) 
        deparse(substitute(x))
    ylabel <- if (!missing(y)) 
        deparse(substitute(y))
    if (missing(x)) {
        if (missing(y)) 
            stop("must specify at least one of 'x' and 'y'")
        else x <- 1:NROW(y)
    }
    else if (missing(y)) {
        y <- x
        ylabel <- xlabel
        x <- 1:NROW(y)
        xlabel <- ""
    }
    kx <- ncol(x <- as.matrix(x))
    ky <- ncol(y <- as.matrix(y))
    n <- nrow(x)
    if (n != nrow(y)) 
        stop("'x' and 'y' must have same number of rows")
    if (kx > 1 && ky > 1 && kx != ky) 
        stop("'x' and 'y' must have only 1 or the same number of columns")
    if (kx == 1) 
        x <- matrix(x, nrow = n, ncol = ky)
    if (ky == 1) 
        y <- matrix(y, nrow = n, ncol = kx)
    k <- max(kx, ky)
    type <- str2vec(type)
    if (is.null(pch)) {
        pch <- c(1:9, 0, letters, LETTERS)
        if (k > length(pch)) 
            warning("default 'pch' is smaller than number of columns and hence recycled")
    }
    else if (is.character(pch)) 
        pch <- str2vec(pch)
    if (verbose) 
        message("matplot: doing ", k, " plots with ", paste(" col= (", 
            paste.ch(col), ")", sep = ""), paste(" pch= (", paste.ch(pch), 
            ")", sep = ""), " ...\n", domain = NA)
    ii <- match("log", names(xargs <- list(...)), nomatch = 0)
    log <- if (ii != 0) 
        xargs[[ii]]
    xy <- xy.coords(x, y, xlabel, ylabel, log = log)
    xlab <- if (is.null(xlab)) 
        xy$xlab
    else xlab
    ylab <- if (is.null(ylab)) 
        xy$ylab
    else ylab
    xlim <- if (is.null(xlim)) 
        range(xy$x[is.finite(xy$x)])
    else xlim
    ylim <- if (is.null(ylim)) 
        range(xy$y[is.finite(xy$y)])
    else ylim
    if (length(type) < k) 
        type <- rep(type, length.out = k)
    if (length(lty) < k) 
        lty <- rep(lty, length.out = k)
    if (length(lwd) < k) 
        lwd <- rep(lwd, length.out = k)
    if (length(pch) < k) 
        pch <- rep(pch, length.out = k)
    if (length(col) < k) 
        col <- rep(col, length.out = k)
    if (length(bg) < k) 
        bg <- rep(bg, length.out = k)
    if (length(cex) < k) 
        cex <- rep(cex, length.out = k)
    ii <- 1:k
    if (!add) {
        ii <- ii[-1]
        plot(x[, 1], y[, 1], type = type[1], xlab = xlab, ylab = ylab, 
            xlim = xlim, ylim = ylim, lty = lty[1], lwd = lwd[1], 
            pch = pch[, 1], col = col[1], cex = cex[1], bg = bg[1], 
            ...)
    }
    for (i in ii) {
        lines(x[, i], y[, i], type = type[i], lty = lty[i], lwd = lwd[i], 
            pch = pch[, i], col = col[i], cex = cex[i], bg = bg[i])
    }
}# matplot2

byCluster <- function(
	x
	,nc=1
	,dist=NULL
	,method=c("euclidean","cor")
){
	require(amap)
	call <- match.call()
	cat("byCluster()\n")
	print(call)
	method <- match.arg(method)
	hc <- hcluster(x,method=method)
	class <- cutree(hc,k=nc)
	names(class) <- rownames(x)
	return(list(
		class=class,dist=dist(0)
	))#return class membership (1,2,...,nc)
}#byCluster()

byCluster2 <- function(
	x
	,ncs
	,method=c("euclidean","cor")
){
	require(amap)
	call <- match.call()
	cat("byCluster2()\n")
	print(call)
	method <- match.arg(method)
	hc <- hcluster(x,method=method)

	rnames <- rownames(x)
	ret <- list()
	for(nc.ind in seq(ncs))
	{
		nc <- ncs[nc.ind]
		class <- cutree(hc,k=nc)
		names(class) <- rnames
		ret[[nc.ind]] <- list(class=class)
	}
	return(ret)
}#byCluster2()



cll <- function(
	d
	,z=NULL
	,w
	,mu
	,var
	,VEC=FALSE
	,TOTALSUM=TRUE
){
#Log likelihood of normal distribution
	#x: matrix
	#mean,var: vector of length is 1
	VEC <- is.null(z)


	if(VEC && colSums(w)[1] == nrow(w))#z is null and all samples are null
	{
		n <- rep(ncol(d),nrow(d))
		T1 <- rowSums(d)
		T2 <- rowSums(d*d)
		mu <- mu[,1]
		var <- var[,1]
	}else
	{
		one <- matrix(1,dim(d)[1],dim(d)[2])
	
		if(VEC){#z is null and all samples are not null
			n  <- one   %*% w
			T1 <- d     %*% w
			T2 <- (d*d) %*% w
		}else
		{#z exists
			n  <- t(z) %*% one   %*% w
			T1 <- t(z) %*% d     %*% w
			T2 <- t(z) %*% (d*d) %*% w
		}
	}

	sd2i <- 1 / var
	P1 <- mu * sd2i
	P2 <- -0.5 * sd2i
	C <- 0.5 * n * (P1 * mu + log(2*pi*var))#*

	#log likelihood of each region
	L <- T1*P1+T2*P2 - C
	if(TOTALSUM)
	{
		if(VEC){
			if(is.matrix(L)){
				return(rowSums(L))#each row, when called by postProb()
			}else{
				return(L)
			}
		}else
		{
			return(sum(L))#total sum
		}
	}else
	{
		return(L)#each region
	}
}# cll

cll_c <- function(
	d
	,w
	,mu
	,var
){
#used by postProb_c()
#Log likelihood of normal distribution
	#d: matrix
	#mean,var: vector of length is 1
	one <- matrix(1,nrow(d),ncol(d))

	n  <- one   %*% w
	T1 <- d     %*% w
	T2 <- (d*d) %*% w

	sd2i <- 1 / var
	P1 <- mu * sd2i
	P2 <- -0.5 * sd2i
	C <- 0.5 * n * (P1 * mu + log(2*pi*var))

	#log likelihood of each region
	L <- T1*P1+T2*P2 - C
	return(rowSums(L))#each row, when called by postProb_c()
}# cll_c

cll_ce2 <- function(
	d
	,w
	,mu
	,var
){
#used by postProb_ce2()
#Log likelihood of normal distribution
	#d: matrix
	#mean,var: vector of length is 1
	one <- matrix(1, nrow(d), ncol(d))

	n  <- one   %*% w
	T1 <- d     %*% w
	T2 <- (d*d) %*% w

	sd2i <- 1 / var
	P1 <- mu * sd2i
	P2 <- -0.5 * sd2i
	C <- 0.5 * n * (P1 * mu + log(2*pi*var))

	#log likelihood of each region
	L <- T1*P1+T2*P2 - C

	return(rowSums(L))#each row, when called by postProb_c()
}# cll_ce2

cll_d <- function(
	d
	,z=NULL
	,w
	,mu
	,var
){
#Log likelihood of normal distribution
	#x: matrix
	#mean,var: vector of length is 1
	
	ncg <- ncol(z)
	ncs <- ncol(w)
	d1.z <- t(z) %*% d
	d2.z <- t(z) %*% (d*d)
	n.z <- t(z) %*% matrix(1,nrow(d),ncol(d))
	T1 <- matrix(0,ncg,ncs)
	T2 <- T1
	n_upd <- T1

	for(k.ind in seq(ncg)){
		T1[k.ind,]     <- d1.z[k.ind,] %*% w[,,k.ind]
		T2[k.ind,]     <- d2.z[k.ind,] %*% w[,,k.ind]
		n_upd[k.ind,]  <-  n.z[k.ind,] %*% w[,,k.ind]
	}


	sd2i <- 1 / var
	P1 <- mu * sd2i
	P2 <- -0.5 * sd2i
	C <- 0.5 * n_upd * (P1 * mu + log(2*pi*var))#non-conformable arrays

	#log likelihood of each region
	L <- T1*P1+T2*P2 - C
	return(L)
}# cll_d

mll <- function(
	d
	,z
	,w
	,mu
	,var
){
	
	#calculate marginal log-likelihood by given data and parameters
	#d: data
	#z,w: posterior probabilities of class assignment
	#mu,var: parameters
	#
	#log marginal class probabilities
#	p <- matrix(log(colMeans(z)))
#	q <- matrix(log(colMeans(w)))

	ncs <- dim(w)[2]
	ncg <- dim(w)[3]


	S2 <- matrix(0,nrow=ncg,ncol=ncs)
	for(l.ind in seq(ncs))
	{
		log.w.mean <- log(colMeans(w[,l.ind,]))
		S2[,l.ind] <- log.w.mean
	}
	S2.k <- matrix(rowSums(S2))

	log.z.mean <- matrix(log(colMeans(z)))
	p.finite <- is.finite(log.z.mean) & is.finite(S2.k)




	log.pp <- (log.z.mean + S2.k)[p.finite]




	if(length(log.pp) == 1 && sum(p.finite) == 1)
	{
		S1 <- sum(z[,p.finite] * log.pp)
	}else{
		S1 <- sum(z[,p.finite] %*% log.pp)
	}



	d1.k <- t(z) %*% d
	d2.k <- t(z) %*% (d*d)
	n.k <- t(z) %*% matrix(1,nrow(d),ncol(d))
	T1 <- matrix(0,ncol(z),ncol(w))
	T2 <- T1
	n_upd <- T1

	for(k.ind in seq(ncg)){
		T1[k.ind,]     <- d1.k[k.ind,] %*% w[,,k.ind]
		T2[k.ind,]     <- d2.k[k.ind,] %*% w[,,k.ind]
		n_upd[k.ind,]  <- n.k[k.ind,]  %*% w[,,k.ind]
	}

	#It is possible that var, mu are NA


	sd2i <- 1 / var
	P1 <- mu * sd2i
	P2 <- -0.5 * sd2i
	C <- 0.5 * n_upd * (P1 * mu + log(2*pi*var))

	#log likelihood of each region
	S3 <- T1*P1+T2*P2 - C
	S3.s <- sum(S3, na.rm=TRUE)

	return(S1+S3.s)
}# mll




mll4 <- function(
	d
	,z
	,w
	,mu
	,var
){
	#calculate marginal log-likelihood by given data and parameters
	#d: data
	#z,w: posterior probabilities of class assignment
	#mu,var: parameters
	#log marginal class probabilities

	ncs <- dim(w)[2]
	ncg <- dim(w)[3]

	phi <- matrix(0, nrow=ncg, ncol=ncs)
	for(l.ind in seq(ncs))
	{
		phi[, l.ind] <- colMeans(w[, l.ind, ])
	}
	PI <- matrix(colMeans(z))
	
	
	ng <- nrow(d)
	ns <- ncol(d)
	n  <- matrix(1,ncol=ns, nrow=ng)
	D2 <- d * d
	sd2i <- 1 / var
	P1 <- mu * sd2i
	P2 <- -0.5 * sd2i

	Rgk <- matrix(NA, nrow=ng, ncol=ncg)
	for(kk in c(1:ncg)){
		for(g in seq(ng)){
			Lil <- matrix(NA, nrow=ns, ncol=ncs)
			for(ll in seq(ncs))
			{
				#log likelihood of each point
				C <- 0.5 * rep(1, ns) * (P1[kk,ll] * mu[kk,ll] + log(2 * pi * var[kk,ll]))

				LFgl <- d[g,] * P1[kk,ll] + D2[g,] * P2[kk,ll] - C
				#make Rgki
				Lil[,ll] <- log(phi[kk, ll]) + LFgl

			}#ll
			Rgk[g, kk] <- log(PI[kk]) + sum(rowLogsumexp(Lil), na.rm=TRUE)# (m,L)
		}# g
	}#kk
	#Rgk is made

	L2 <- sum(rowLogsumexp(Rgk), na.rm=TRUE)
	return(L2)
}# mll4




rowLogsumexp <- function(
	x
){
#used in postProb()
		if(is.matrix(x)){
			x.t <- x
			x.t[is.na(x.t)] <- -Inf
			maxcols <- max.col(x.t)
			max.ind <- cbind(seq(nrow(x)), maxcols)
			y <- x[max.ind]
	    y + log(rowSums(exp(x - y), na.rm=TRUE))
		}else if(is.vector(x)){
	    #if x is vector
	    y <- max(x)
	    y + log(sum(exp(x-y), na.rm=TRUE))
		}else{
			stop("")
		}
}#rowLogsumexp


mStep_e <- function(
	d
	,z
	,w
	,THEORET=FALSE
){
	if(THEORET)
	{
		ns <- ncol(d)
		if(nrow(w) != ns){
			print(dim(w))
			stop("mStep_e:bad length\n")}
	}
	ncg <- ncol(z)
	ncs <- 2
	mu_upd  <- matrix(0,ncg,ncs)
	var_upd <- matrix(0,ncg,ncs)
	n_upd   <- matrix(0,ncg,ncs)

	d.z <- t(z) %*% d
	n.z <- t(z) %*% matrix(1,nrow(d),ncol(d))
	T1 <- matrix(0,ncol(z),ncol(w))

	for(k.ind in seq(ncg)){
		T1[k.ind,]     <- d.z[k.ind,] %*% w[,,k.ind]
		n_upd[k.ind,]  <- n.z[k.ind,] %*% w[,,k.ind]
	}
	mu_upd  <- T1 / n_upd
	for(k.ind in seq(ncg)){
	for(l.ind in seq(ncs)){
		diff <- d - mu_upd[k.ind,l.ind]
		DD <- diff * diff		
		DD.z <- t(z[,k.ind]) %*% DD			
		T2 <- DD.z %*% w[,l.ind,k.ind]	
		var_upd[k.ind,l.ind] <- T2 / n_upd[k.ind,l.ind]
	}}
	mu_upd[is.nan(mu_upd)]   <- NA
	var_upd[is.nan(var_upd)] <- NA
	if(THEORET)
	{
		mu_upd[1,1] <- 0
		mu_upd[ ,1] <- 0
		var_upd[1,1] <- 1
		var_upd[ ,1] <- 1
	}

	return(list(
		n=n_upd
		,mu=mu_upd
		,var=var_upd
	))
}# mStep_e


postProb_d <- function(
	d#data
	,z#class indicator (to update)
	,w#class indicator, given
	,mu
	,var
){
	#calculate posterior probabilities of class assignment of genes	
	ncg <- ncol(z)
	ng <- nrow(z)
	ncs <- ncol(w)

	CLK <- matrix(0,ncg,ng)

	for(kk in seq_len(ncg))#gene class kk
	{
#		cat("k=",kk,"\n")
		s <- rep(kk,ng)
		#complete log likelihood
		CLK[kk,] <- cll(d=d,w=w[,,kk],mu=mu[s,],var=var[s,])
	}
	
	#prior probabilities of class assignments
	prior.z <- colMeans(z)
#	prior.w <- colMeans(w)
	if(any(prior.z < 0) || sum(prior.z) > 1)
	{
		print(prior.z)
		stop("postProb_d(). bad prior.z\n")
	}
	# posterior probability for class assignment
	LpCLK <- t(log(prior.z)+CLK)
	#update class assignments based on posterior
	z_upd <- max.col(LpCLK)
	max.ind <- cbind(seq(ng),z_upd)
	#Transform to matrix
	z.m <- matrix(0,nrow=nrow(z),ncol=ncol(z))
	z.m[max.ind] <- 1

	#normalize posterior (logsumexp, Minka's function)
	LpCLKMax <- LpCLK[max.ind]
	D <- LpCLK - LpCLKMax
	MLL2 <- log(rowSums(exp(D)))
	logk <- D - MLL2
	k <- exp(logk)


	return(list(
		z=z.m			#updated class assignments
		,k=k			#posterior probability
		,logk=logk#log posterior probability
		))
}# postProb_d


postProb_e <- function(
	d#data
	,z#class indicator 
	,w#class indicator (to update)
	,y=NULL#clinical class indicator
	,mu
	,var
	,marginal=FALSE
){
	#calculate posterior probabilities for sample class for each gene class
	w_upd <- array(0,dim(w))
	k_upd <- array(0,dim(w))
	logk_upd <- array(0,dim(w))

	if(marginal)
	{
		for(k in 1:ncol(z))#For each gene class
		{
			pp <- postProb_c(
				d=t(d[z[,k] == 1,])#row:sample, col:gene
				,z=w[,,k]#sample
				,w=z_mat#gene
				,y=y
				,mu=mu[k,]
				,var=var[k,]
			)
			w_upd[,,k] <- pp$z
			k_upd[,,k] <- pp$k
			logk_upd[,,k] <- pp$logk
		}
	}else
	{
		for(k in 1:ncol(z))#For each gene class
		{
			zlen <- sum(z[,k]==1)
			z_mat <- outer(rep(1,zlen),c(1,0))
			if(sum(w[,2,k]) == 0)#Not k=1 but consider all samples are non-outlier
			{
				w_upd_k1 <- matrix(0,nrow(w),2)
				w_upd_k1[,1] <- 1
	
				w_upd[,,k] <- w_upd_k1
				k_upd[,,k] <- w_upd_k1
	
				w_upd_logk1 <- matrix(-Inf,nrow(w),2)
				w_upd_logk1[,1] <- 0
	
				logk_upd[,,k] <- w_upd_logk1
			}else
			{
				if(ncol(t(d[z[,k] == 1,])) != nrow(z_mat))
				{
					print(k)
					print(dim(t(d[z[,k] == 1,])))
					print(dim(z_mat))
					stop("postProb_e() bad z.\n")
				}
				pp <- postProb_c(
					d=t(d[z[,k] == 1,])#row:sample, col:gene
					,z=w[,,k]#sample
					,w=z_mat#gene
					,mu=mu[k,]
					,var=var[k,]
				)
				w_upd[,,k] <- pp$z
				k_upd[,,k] <- pp$k
				logk_upd[,,k] <- pp$logk
			}
		}# for each k
	}
	
	return(list(
		w=w_upd
		,k=k_upd
		,logk=logk_upd
	))
}# postProb_e


postProb_e2 <- function(
	d#data
	,z#class indicator for genes
	,w#class indicator (to update), for samples
	,mu# parameters
	,var# parameters
){
	#posterior probabilities for sample assignemnt for each gene class
	w_upd <- array(0,dim(w))
	l_upd <- array(0,dim(w))
	logl_upd <- array(0,dim(w))

	for(k in c(1:ncol(z)))#for each gene component
	{
		pp <- postProb_ce2(
			d=t(d)		#row:sample, col:gene
			,z=w[,,k]	#sample
			,w=matrix(z[,k])
			,mu=mu[k,]
			,var=var[k,]
		)
		if(!is.list(pp))
		{
			stop(paste("postProb_e2(): k=",k,"\n",sep=""))
		}
		w_upd[,,k] <- pp$z
		l_upd[,,k] <- pp$k
		logl_upd[,,k] <- pp$logk

	}# for each k

	return(list(
		w=w_upd
		,l=l_upd
		,logl=logl_upd
	))
}# postProb_e2


postProb_c <- function(
#Called by postProb_e and postProb_e2 only.
	d#data (row:sample,col:gene)
	,z#class indicator (to update), sample
	,w#class indicator gene
	,mu# parameters
	,var# parameters
){
	#calculate posterior probabilities of class assignment
	ncs <- ncol(z)#sample
	ns  <- nrow(z)#sample

	#prior probabilities of class assignments of samples for gene class (k) (q_k)
	p <- colMeans(z)#sample
	if(any(p < 0) || sum(p) > 1)
	{
		print(p)
		stop("postProb_c(). bad z\n")
	}

	#calculate log-likelihood
	CLK <- matrix(0,ncs,ns)
	for(ll in c(1:ncs))#each sample class ll=1,2
	{
		s <- rep(ll,ns)#sample class
		#complete log likelihood
		if(ncol(d) != nrow(w)){stop("postProb_c()\n")}
		CLK[ll,] <- cll_c(
			d=d
			,w=w
			,mu=mu[s]
			,var=var[s])


	}#ll
	
	CLK[is.na(CLK)] <- -Inf

	# posterior probability for class assignment
	LpCLK <- t(log(p) + CLK)

	#update class assignments
	#z_upd <- apply(LpCLK,1,which.max)
	z_upd <- max.col(LpCLK)
	max.ind <- cbind(seq(ns),z_upd)
	#Transform to matrix
	z.m <- matrix(0,nrow=nrow(z),ncol=ncol(z))
	z.m[max.ind] <- 1

	#normalize posterior (logsumexp, Minka's function)
	LpCLKMax <- LpCLK[max.ind]
	D <- LpCLK - LpCLKMax
	logk <- D - log(rowSums(exp(D)))
	k <- exp(logk)

	return(list(
		z=z.m			#updated class assignments for sample
		,k=k			#posterior probability
		,logk=logk#log posterior probability
		,LpCLK=LpCLK
		))
}# postProb_c



postProb_ce2 <- function(
	d#data (row:sample,col:gene)
	,z#class indicator (to update) for samples
	,w#class indicator for genes
	,mu# parameters
	,var# parameters
){
	# Called by postProb_e2 only.
	# Calculate posterior probabilities of class assignment
	ncs <- ncol(z)#sample
	ns  <- nrow(z)#sample

	#prior probabilities of class assignments of samples for gene class (k) (q_k)
	p <- colMeans(z)#sample
	if(any(p < 0) || sum(p) > 1)
	{
		cat("postProb_ce2(). bad z\ncolMeans(z):",p,"\n")
		return(FALSE)
	}
	if(ncol(d) != nrow(w)){stop("postProb_ce2()\n")}
	#calculate log-likelihood
	CLK <- matrix(0,ncs,ns)
	for(l.ind in c(1:ncs))#each sample class l.ind=1,2
	{
		s <- rep(l.ind, ns)#sample class
		#complete log likelihood
		CLK[l.ind, ] <- cll_ce2(#returns vector of length is ns
			d=d
			,w=w
			,mu=mu[s]
			,var=var[s])
	}#
	
	CLK[is.na(CLK)] <- -Inf

	# posterior probability for class assignment of 
	LpCLK <- t(log(p) + CLK)



	#update class assignments
	#z_upd <- apply(LpCLK, 1, which.max)
	z_upd <- max.col(LpCLK)
	
	#normalize posterior (logsumexp, Minka's function)
	max.ind <- cbind(seq(ns),z_upd)
	#Transform to matrix
	z.m <- matrix(0,nrow=nrow(z),ncol=ncol(z))
	z.m[max.ind] <- 1


	LpCLKMax <- LpCLK[max.ind]
	D <- LpCLK - LpCLKMax
	logk <- D - log(rowSums(exp(D)))
	k <- exp(logk)


	return(list(
		z=z.m			#updated class assignments for sample
		,k=k			#posterior probability
		,logk=logk#log posterior probability
		,LpCLK=LpCLK
		))
}# postProb_ce2


reassign2 <- function(
	z
	,p=0.1
	,from=NULL
	,to=NULL
){
	#Reassignment of component label randomly
	#Only used by shuffle()
	z_r <- z
	zk_s <- to
	ngc.assign <- length(zk_s)
	assign.prob <- rep(1,ngc.assign)/ngc.assign
	# Select 100 x p % of genes to reassign class label
	z.ind <- max.col(z)
	ng <- which(rowSums(outer(z.ind,from,"=="))==1)
	ng.assign <- floor(length(ng)*p)
	cat("ng:\n")
	print(str(ng))
	z_s <- sample(x=ng,size=ng.assign,replace=F)

	z_r[z_s,zk_s] <- t(
		rmultinom(
			n=ng.assign
			,size=1
			,prob=assign.prob
		))
	z_r[z_s,-zk_s] <- 0
	return(z_r)
}# reassign2


reassign3 <- function(
	d
	,z
	,from=NULL	#component from which genes come
	,to=NULL		#component to which genes go
){
	#reassignment by clustering
	zk_s <- union(from,to)
	if(!all(is.element(zk_s,seq(ncol(z))))){stop("reassign3 error\n")}
	n.split <- length(zk_s)
	if(length(from) > 1)
	{
		z_s <- rowSums(z[,from]) != 0#selection of genes
	}else{
		z_s <- z[,from] != 0#selected index of genes
	}
	if(length(z_s) != nrow(d)){stop("reassign3(): length check, z_s\n")}


	r <- byCluster(
		x=d[z_s,]
		,method="cor"
		,nc=n.split)

	r.class <- r$class
	n.perm <- floor(length(r.class) * .1)
	smp <- sample(seq(r.class),n.perm)
	rep <- sample(smp)
	r.class[smp] <- r.class[rep]

	z_r <- z#return index matrix
	z_r[z_s, zk_s] <- outer(r.class,seq(n.split),"==")
	z_r[z_s,-zk_s] <- 0

	return(z_r)
}# reassign3



shuffle <- function(
	d
	,z
	,w
	,size.limit=1
	,mu
	,var
	,THEORET=FALSE
){

	nc <- ncol(z)
	rep_cnt <- 0

	repeat
	{
		rep_cnt <- rep_cnt + 1
		cll_temp <- cll_d(
			d=d
			,z=z
			,w=w
			,mu=mu
			,var=var
		)
		cSumz <- colSums(z)
		cll_dev_n <- rowSums(cll_temp,na.rm=TRUE)/cSumz
		cll_dev_n.rank <- rank(cll_dev_n)

		minimum.components.ind <- which(cSumz <= size.limit)
		if(length(minimum.components.ind) == 0)
		{
			minimum.components.ind <- which.min(cSumz)
		}


		from.ind <- which(!is.element(seq(nc), minimum.components.ind))

		rep_cnt2 <- 0
		###############################################################
		repeat{
			rep_cnt2 <- rep_cnt2 + 1
			z_upd <- reassign2(
				z=z
				,p=runif(n=1,min=0.5,max=0.8)
				,from=from.ind
				,to=minimum.components.ind#component with minimum nubmer of members
			)

			print(colSums(z_upd))
			if(min(colSums(z_upd)) >= 2) break
			if(rep_cnt2 >= 10)
			{
				return(list(result=FALSE))
			}
			#After 10 repeats of assignment, if min(colSums(z_upd)) == 1 then error will occure with kmeans
		}# repeat class assignment until ...
		cat("updated z\n")
		z <- z_upd
		gene.components <- max.col(z)

		#Making w based on given z and d
		w <- array(0,dim=c(ncol(d),2,nc))
		w[,1,1] <- 1
		for(c.ind in c(2:nc))
		{
			c.ind.genes <- gene.components == c.ind
			sample.exp <- d[c.ind.genes,]
			cl.out <- kmeans(t(sample.exp),2)
			mean1 <- mean(sample.exp[,cl.out$cl==1])
			mean2 <- mean(sample.exp[,cl.out$cl==2])
			outlier.class.ind <- ifelse(abs(mean1) > abs(mean2), 1, 2)
			high.ind <- cl.out$cl == outlier.class.ind
			oe.ind <- high.ind
			w[!oe.ind, 1, c.ind] <- 1
			w[ oe.ind, 2, c.ind] <- 1
		}
	
		mOut <- mStep_e(d=d, z=z, w=w, THEORET=TRUE)
		mu <- mOut$mu
		var <- mOut$var
	
		
		cat("repeat in shuffle()\n")
		if(min(colSums(z_upd)) > size.limit) break
	}# repeat

	cat("shuffle() repeat:",rep_cnt,"\n",sep="")

	return(list(
		z=z
		,w=w
		,mu=mu
		,var=var
		,result=TRUE
	))
}# shuffle


entropy <- function(
	z
	,log=FALSE
){
	if(log){
		if(any(!is.finite(z))){warning("Infinite z!\n")   }
		if(any(z > 0))        {warning("Positive in z!\n")}
		logz <- z[is.finite(z) & z < 0]
	}else{
		if(any(z <= 0)){warning("Non-positive z!\n")    }
		if(any(z >  1)){warning("More than one in z!\n")}
		logz <- log(z[z > 0 & z < 1])
	}
	if(length(logz) == 0) return(0)
	B <- logz + log(-logz)
	B <- B[!is.nan(B)]
	MB <- max(B)
	K <- log(sum(exp(B - MB))) + MB
	return(-exp(K))
}#entropy

EN.doEM3 <- function(
	k
	,l
){
	EN <- 0
	logk <- k
	logl <- l
	ENk <- entropy(z=logk, log=TRUE)
	ENl <- entropy(z=logl, log=TRUE)
	EN <- -(ENk * dim(l)[1] * dim(l)[3] + ENl * dim(k)[1])
	return(EN)
}#EN.doEM3



doEM <- function(
	d
	,z
	,w
	,k
	,l
	,mu
	,var
	,C_limit=50
	,V=FALSE
	,label=NULL
	,THRESH=1e-10#For the judgement of convergence
	,THEORET=FALSE
	,FV=FALSE#TRUE if using fixed variance
	,shu_cnt_thresh=10#shuffle count threshold
	,V2=FALSE# plotV
	,size.limit=2#Minimum size of component (Number of members)
)
{
	cl <- match.call()
	cat("doEM()\n")
	n.class <- ncol(z)
	if(THEORET)#theoretical null
	{
		mu[1,1] <- 0
		mu[1, ] <- 0
		var[1,1] <- 1
		var[1, ] <- 1
	}
	if(FV)#fixed variance
	{
		var[1:n.class,  ] <- 1
	}

	mll_it   <- rep(0,C_limit)
	mll_it_2 <- rep(0,C_limit*2)
	cll_it <- rep(0,C_limit)
	shu_it <- rep(FALSE,C_limit)
	c_it <- 0#Iteration counter of EM algorithm
	shu_cnt_total <- 0#total shuffle count through EM iterations
	MLL <- 0

	if(V2)
	{
		windows(title=paste("w", n.class, sep=""))
		par(mfrow=c(4,4))
		plotW(w,z,main="0")
	}
	################################################################################################
	# EM Algorithm
	################################################################################################
	repeat{
		c_it <- c_it + 1

		################################################################################################
		# shuffling gene class assignment (:z)
		################################################################################################
		shuffle_cnt <- 0#shuffle count in this iteration
		if(min(colSums(z)) <= size.limit && shuffle_cnt <= 3)
		{
			if(shu_cnt_total >= shu_cnt_thresh){#shuffle count threshold
				return(
					list(
						result=FALSE
						,c_it=c_it
						,shu_cnt_total=shu_cnt_total
						,comment="shuffle count over"
						,n.class=n.class
					)
				)
			}
			shu_cnt_total <- shu_cnt_total + 1
			shuffle_cnt <- shuffle_cnt  + 1
			if(shuffle_cnt >= 10){
				cat("### exit by shuffle_cnt >= 10\n")
				last
			}
			z.b <- z
			sh.res <- shuffle(
				d=d
				,z=z
				,w=w
				,size.limit=size.limit
				,mu=mu
				,var=var
				,THEORET=THEORET
				)
			if(sh.res$result==FALSE)
			{
				cat("### exit by shuffle failed.\n")
				return(
					list(
						result=FALSE
						,c_it=c_it
						,shu_cnt_total=shu_cnt_total
						,comment="shuffling failed"
						,n.class=n.class
					))
			}
			k <- sh.res$z
			l <- sh.res$w
			mu <- sh.res$mu
			if(FV){
			}else{
				var <- sh.res$var
			}
			shu_it[c_it] <- TRUE
			cat("Before shuffling (colSums(z)\n")
			print(colSums(z.b))
			cat("After shuffling (colSums(k)\n")
			print(colSums(k))
		}#Shuffling
		################################################################################################
		# (E1)Gene component assignments is calculated given sample components
		################################################################################################
		pp1 <- postProb_d(
			d=d
			,z=k
			,w=l
			,mu=mu
			,var=var)

		z_upd <- pp1$z
		k_upd <- pp1$k

		if(V)
		{
			cat("z_upd\n")
			print(colSums(z_upd))#Error
		}

		################################################################################################
		# E step 2 (E2)sample components are calculated for each gene class
		################################################################################################

		pp2 <- postProb_e2(
			d=d
			,z=k_upd
			,w=l
			,mu=mu
			,var=var
		)
		l_upd <- pp2$l
		w_upd <- pp2$w

		if(V)
		{
			cat("l_upd\n")
			print(t(colSums(l_upd)))
			cat("w_upd\n")
			print(t(colSums(w_upd)))
		}
		################################################################################################
		# (M step 2)
		# Calculating parameters under calculated gene class and sample class
		################################################################################################
		mOut <- mStep_e(d=d, z=k_upd, w=l_upd, THEORET=THEORET)
		if(V)
		{
			cat("mOut:\n")
			print(mOut)
		}
		################################################################################################
		# Updated parameters
		################################################################################################
		z <- z_upd
		w <- w_upd
		k <- k_upd
		l <- l_upd
		mu <- mOut$mu
		if(FV){
		}else{
			var <- mOut$var
		}
		################################################################################################
		# Calculate marginal log-likelihood
		################################################################################################
		MLL2 <- mll4(
			d=d
			,z=k
			,w=l
			,mu=mu
			,var=var)
		
		cll_temp <- cll_d(
			d=d
			,z=z
			,w=w
			,mu=mu
			,var=var
		)

		CLL <- sum(cll_temp,na.rm=TRUE)
		
		MLL <- MLL2
		if(V)
		{
			cat("marginal log-likelihood\n")
			print(MLL)
		}
		mll_it[c_it] <- MLL
		cll_it[c_it] <- CLL
	
		if(c_it >= 2)
		{
			res <- abs(MLL - mll_it[c_it - 1])
		}else
		{
			res <- NA
		}
		
		
		if(V)
		{
			cat("number of genes for each component.\n")
			print(colSums(z_upd))
			#for samples for each gene component
			cat("number of non-outlier/outlier samples for each gene component.\n")
			print(t(colSums(w_upd)))
		}

		if(V2) {
			shu.mark <- ifelse(shu_it[c_it],"s","")
			plotW(w,z,main=paste(c_it,shu.mark,"(",round(log10(res),3),")",sep=""))}


		################################################################################################
		# Convergence judgement
		################################################################################################
		if((c_it >= 2 && res <= THRESH) || c_it == C_limit) break

		mark <- ifelse(shu_it[c_it],"s" ,"*")

		if(c_it %% 10 == 1){
			cat("\n", c_it, ":", mark, sep="")
		}else{
			cat(mark)
		}
		if(V)
		{
			cat("Residual:\n")
			print(res)
		}
	}#iteration
	cat("\n")
	
	################################################################################################
	# Show results
	################################################################################################
	if(V)
	{
		#Posterior probabilities
		windows(title="04: posteriro for gene classes")
		par(mfrow=c(2,1))
		matplot(pp1$k,type="l",main="posterior (gene)")
		matplot(pp1$logk,type="l",main="log posterior (gene)")
		
		windows(title="05: posterior for sample")
		par(mfrow=c(2,1))
		matplot(pp2$l[,2,],type="l", main="posterior for sample, for gene class 2 and 3")
		matplot(pp2$logl[,2,],type="l", main="log posterior for sample, for gene class 2 and 3")
	}

	if(V)
	{
		if(!is.null(label))
		{
			pngfile4 <- paste(label,"_posterior.png",sep="")
			png(filename=pngfile4)
			par(mfrow=c(2,1))
			matplot(pp1$k,type="l",main="posterior (gene)")
			matplot(pp2$l[,2,],type="l", main="posterior for sample")
			dev.off()
		}
	}

	cat("# of iterations.\n")
	print(c_it)
	
	LL_it <- cbind(mll_it[1:c_it],cll_it[1:c_it])
	colnames(LL_it) <- c("MLL","CLL")
	if(FALSE)
	{
		#log-ikelihood
		plot.symbol <- ifelse(shu_it[1:c_it],2,1)
		windows(title="06:log-likelihood")
		par(mfrow=c(2,1))
		plot(
			LL_it[,1]
			,type="b"
			,pch=plot.symbol
			,xlab="iteration"
			,ylab="marginal log likelihood"
			,main=paste("Marginal Log-Likelihood, c= ",c_it,sep="")
			,col="black"
			)
		plot(
			LL_it[,2]
			,type="b"
			,pch=plot.symbol
			,xlab="iteration"
			,ylab="complete log likelihood"
			,main=paste("Complete Log-Likelihood, c= ",c_it,sep="")
			,col="black"
			)
	}
	
	#Entropy of fuzzy classification matrix for ICL
	EN <- EN.doEM3(k=pp1$logk, pp2$logl)
	MCL <- cll_it[c_it]#maximum complete likelihood

	if (THEORET)
	{#Number of free parameters
		if(FV)
		{
			DF <- (n.class - 1) * 3
		}else{
			DF <- (n.class - 1) * 4
		}
	}else
	{
		DF <- length(mOut$mu[!is.na(mOut$mu)]) * 2
	}
	N <- nrow(d) * ncol(d)
	#information criteria
	AIC <- (-2 * MLL) + (2      * DF)
	BIC <- (-2 * MLL) + (log(N) * DF)
	ICL <- BIC + (2 * EN)#ICL: McLachlan and Peel, 2000, p.215
	
	cat("Entropy:\n")
	print(EN)
	cat("AIC:\n")
	print(AIC)
	cat("BIC:\n")
	print(BIC)
	cat("ICL:\n")
	print(ICL)

	if(V)
	{
		rowColors <- makerowColors(z)
		colColors <- makecolColors(w)

		windows(title="07: Heatmap (result)")
		heatmap(
			d
			,xlab="samples",ylab="genes"
			,col=colorRampPalette(c("green","black","red"))(64)
			,Rowv=NA,Colv=NA
			,RowSideColors=rowColors
			,ColSideColors=colColors
			,labRow=FALSE
			,labCol=FALSE
			,main="result class"
		)
	}

	rownames(k) <- rownames(d)
	rownames(z) <- rownames(d)
	rownames(l) <- colnames(d)
	rownames(w) <- colnames(d)

	return(list(
		z=z
		,w=w
		,k=k
		,l=l
		,mu=mu
		,var=var
		,AIC=AIC
		,BIC=BIC
		,ICL=ICL
		,EN=EN
		,c_it=c_it
		,shu_it=shu_it[1:c_it]
		,LL=LL_it[1:c_it,]
		,res=res
		,MLL_IT2=mll_it_2
		,result=TRUE
		,THEORET=THEORET
		,n.class=n.class
	))

}# doEM

