## sparse mixed membership stochastic block model
## (c) Yongjin Park, 2013
require(Matrix)
source("elastic.r")

smmsb <- function( A, K, n.iter=100, tol=1e-2, verbose=T, t.off=0, t.exp=.55 )
  {
    model <- Sparse.MMSB$new( A, K, tol=tol )
    model$block.inference(1,verbose)

    lasso <- Elastic.Net$new(lmd.1=1/mean( sqrt(model$rho.sq) ),
                             lmd.2=1/mean( model$rho.sq ),
                             p = K+1, type="lasso" )

    ## update lasso
    lasso$variational.update( model$rho.sq )
    model$gam.0 <- lasso$v.inv

    for( h.iter in 1:n.iter )
      {
        for( t in 1:n.iter )
          {
            rate <- (t.off+t)^(-t.exp)
            model$latent.inference( n.iter=n.iter, verbose=verbose )
            model$block.inference( learn.rate=rate, verbose=verbose )

            v.old <- lasso$v.inv
            lasso$variational.update( model$rho.sq )

            if( t > 1+t.off )
              {
                diff <- mean( abs(v.old - lasso$v.inv) ) / max( tol, mean(abs(v.old)) )
                if( diff < tol ){ break }
              }

            model$gam.0 <- lasso$v.inv
          }
        lmd.1.old <- lasso$lmd.1
        lasso$empirical.bayes()

        cat("\n\n lambda = ", lmd.1.old, "\n\n",sep="")
        if( h.iter > 1 )
          {
            diff <- abs( lmd.1.old - lasso$lmd.1 )
            if( diff < tol ){ break }
          }


        print( model$rho )
        print( round(model$Y,1) )
      }

    list( model=model, prior=lasso )
  }

Sparse.MMSB <-
  setRefClass("Sparse.MMSB",
              fields=list(
                A = "dgCMatrix",   # data matrix (n x n)
                deg = "vector",    # degree vector (1 x n)
                n = "numeric",     # dimension of A
                Y = "dgCMatrix",   # membership matrix (n x K)
                K = "numeric",     # num of blocks
                rho = "vector",    # block parameters for edges (1 x (K+1))
                rho.sq = "vector", # block parameters for edges (1 x (K+1))
                eta = "vector",    # natural parameters for edges (1 x (K+1))
                gam.0 = "vector",  # prior precision
                gam = "vector",    # precision matrix (only diagonal; 1 x (K+1))
                tol = "numeric",   # tolerance
                m.tot = "numeric", # num of undirected edges in A
                sig.hat= "numeric",# edge probability by model
                log.tot="numeric"  # total sum of log-prob
                ),
              methods=list(
                initialize = function( A, K, Y=NULL, tol=1e-3 )
                {
                  stopifnot( K > 0, dim(A)[1] == dim(A)[2], tol > 0 )
                  A <<- as(A, "dgCMatrix")
                  deg <<- apply(A,2,sum)
                  ## complain zero-degree node
                  stopifnot( sum(deg <= 0) == 0 )
                  
                  m.tot <<- sum( triu(A,1) )
                  K <<- K
                  n <<- dim(A)[1]
                  tol <<- tol
                  
                  ## estimate edge probability
                  sig.hat <<- min(1-tol, max(tol, m.tot / (n*(n-1)/2)))
                  log.tot <<- 0

                  ## initial block parameter
                  ## we try to find K dense blocks
                  ## with sparse background
                  rho.0 <- log( sig.hat ) - log( 1 - sig.hat )
                    
                  rho <<- c(rep(2, K) + 0.1*runif(K), rho.0)
                  rho.sq <<- rho^2
                  gam <<- rep(tol,K+1)
                  gam.0 <<- rep(tol,K+1)
                  eta <<- rep(0,K+1)

                  ## latent membership
                  latent.seeding()
                },                
                ## latent variable assignment
                latent.inference = function( n.iter = 1000, verbose=F )
                {
                  n.k <- apply(Y, 2, sum)

                  for( iter in 1:n.iter )
                    {
                      n.k.old <- n.k
                      for( i in sample(n,n) )
                        {
                          n.k <- n.k - Y[i,]
                          Y[i,] <<- 0
                          adj <- which( A[,i]>0 )
                          if( length(adj) == 0 ){ next }
                          d.k <- apply( Y[adj,], 2, sum )
                          if( sum(d.k) <= 0 ){ next }
                          valid <- which(d.k>0)

                          log.mass <- rho[valid] * (d.k[valid] - sig.hat * n.k[valid])
                          Y[i, valid] <<- sig.vec(log.mass)
                          n.k <- n.k + Y[i,]
                        }
                      diff <- mean(abs(n.k-n.k.old))/max(tol,mean(n.k.old))
                      verbose.msg( paste(" Latent Iter =", iter, ", Diff =", round(diff)), verbose)
                      if( diff < tol ){ break; }
                    }
                  verbose.msg( " Done: latent variable inference", verbose )
                },
                ## block parameter estimation
                block.inference = function( learn.rate, verbose=F )
                {
                  ## construct pseudo-training set
                  u <- runif( n )
                  edge.set.node <- which( u >= 1/2 )
                  hole.set.node <- which( u < 1/2 )

                  s.edge <- sapply( edge.set.node, function(i) which(A[,i]>0) )
                  c.edge <- rep(1/2, length(edge.set.node))

                  ## todo: [fix this]
                  s.hole <- sapply( hole.set.node, function(i) sample( which( A[,i]<1 ), deg[i] ) )
                  c.hole <- sapply( hole.set.node, function(i) deg[i] / (n - 1 - deg[i]) )

                  nodes <- c(edge.set.node, hole.set.node)
                  cor.fac <- c(c.edge, c.hole)
                  a.ij <- c(rep(1,length(edge.set.node)), rep(0,length(hole.set.node)))
                  pairs <- c(s.edge, s.hole)

                  ## linearize traininig set
                  n.samples <- sapply( pairs, length )
                  n.tot <- length( n.samples )


                  i.idx <- lapply( 1:n.tot, function(s) rep(nodes[s], n.samples[s]) )
                  i.idx <- do.call( c, i.idx )

                  j.idx <- do.call( c, pairs )

                  c.ij <- lapply( 1:n.tot, function(s) rep(cor.fac[s], n.samples[s]) )
                  c.ij <- do.call( c, c.ij )

                  a.ij <- lapply( 1:n.tot, function(s) rep(a.ij[s], n.samples[s]) )
                  a.ij <- do.call( c, a.ij )

                  rho.hat <- rho

                  ## co-membership (covariates)
                  Y.sub <- Y[ i.idx, ] * Y[ j.idx, ]
                  occupied <- which( apply(Y.sub, 2, sum) > 0 )

                  for( iter in 1:10000 )
                    {                     
                      
                      if( iter > 1 ) { log.mass.old <- log.mass }

                      log.mass <- apply( Y.sub, 1, function(y) sum(rho.hat[1:K]*y) + rho.hat[K+1] )

                      ## local quadratic form
                      sig.ij <- sig.vec( log.mass )
                      r.ij <- sig.ij * (1-sig.ij)
                      z.ij <- log.mass + (a.ij - sig.ij)/(tol+r.ij)

                      for( k in occupied )
                        {
                          y <- Y.sub[,k]
                          
                          log.mass <- log.mass - rho.hat[k] * y
                          num <- sum( r.ij/c.ij * (z.ij - log.mass) * y )
                          denom <- sum( r.ij/c.ij * (y^2) ) + (tol + gam[k])
                          rho.hat[k] <- num / denom

                          log.mass <- log.mass + rho.hat[k] * y
                        }

                      if( iter > 1 ){
                        diff <- mean( abs(log.mass-log.mass.old) )
                        verbose.msg( paste(" Block Iter =", iter, " Diff =", round(diff,4)), verbose )
                        if( diff < tol ){ break; }
                      }
                    }
                  
                  ## update gamma
                  gam.stuff <- c( apply( Y.sub, 2, function(y) sum( y*r.ij/c.ij ) ), sum(r.ij/c.ij) )
                  gam.stuff[ gam.stuff < tol ] <- tol
                  gam.new <- gam.0 + gam.stuff
                  gam <<- (1-learn.rate) * gam + learn.rate * gam.new

                  ## update eta
                  eta.new <- gam.0 * rho.hat + gam.stuff * rho.hat
                  eta <<-(1-learn.rate) * eta + learn.rate * eta.new

                  ## update rho and rho^2
                  rho <<- eta / gam
                  rho.sq <<- rho^2 + 1/gam

                  ## update explained edges
                  tmp <- apply( Y.sub, 1, function(y) sum(rho.hat[1:K]*y) + rho.hat[K+1] )
                  log.tot.new <- sum( 1/c.ij * tmp )
                  log.tot <<- (1-learn.rate) * log.tot + learn.rate * log.tot.new

                  sig.hat <<- sig.vec( log.tot )
                  verbose.msg( paste(" Done: block parameter estimation rate =", round(learn.rate,4)), verbose )
                },
                ## helper function, sigmoid
                sig.vec = function( xv )
                {
                  ret <- sapply( xv, function(x) 1 / (1+ exp(-x)) )
                  return( ret )
                },
                ## initialize latent assignment by greedy seeding
                latent.seeding = function( )
                {
                  rand.order <- sample(n,n)
                  m <- min(n,2*K)
                  clust <- 1:min(K,m)
                  if( m > K ){ clust <- c(clust, 1:(m-K)) }

                  ## random seeding for 1st K
                  Y.rand <- spMatrix( nrow=n, ncol=K,
                                     i = rand.order[1:m],
                                     j = clust,
                                     x = rep(1,m) )
                  ## greedy seeding
                  if( n > m ) {                    
                    n.k <- apply(Y.rand, 2, sum)

                    for( i in rand.order[-(1:m)] )
                      {
                        adj <- which( A[,i]>0 )
                        if( length(adj) == 0 ){ next }
                        d.k <- apply( Y.rand[adj,], 2, sum )                       
                        if( sum(d.k) <= 0 ){ next }
                        valid <- which(d.k>0)
                        
                        log.mass <- rho[valid] * (d.k[valid] - sig.hat * n.k[valid])
                        k <- valid[which.max(log.mass)]
                        Y.rand[i,k] <- 1
                        n.k[k] <- n.k[k] + 1
                      }
                  }
                  Y <<- as(Y.rand,"dgCMatrix")
                },
                ## helper
                verbose.msg = function( msg, verbose )
                {
                  if( verbose ){ cat(" [sMMSB] ",msg,"\n",sep="") }
                }
                )
              ) # end of class





## EOF
