/************************************************************/
/*      Group Lasso for the multinomial mixture model       */ 
/*              Author:  Wayne Zhang                        */
/*            actuary_zhang@hotmail.com                     */
/************************************************************/

/**
 * @file glmm.c
 * @brief Group Lasso for the multinomial mixture model
 * @author Wayne Zhang                         
 */

#include <R.h>
#include <Rmath.h>
#include "rmm.h"  
#include "utilities.h"
#include "nrmm.h"

/**
 * Compute the the max value of lambda for which all coefficients are zero
 * 
 * @param X input sparse design matrix (assuming X'X = nO) 
 * @param y resposne
 * @param grp an integer vector (of length nG + 1) indicating the grouping structre of X
 * @param nG the number of groups in X
 *
 * @return the max lambda
 *
 */
double rmm_get_maxLam(SEXP X, double *y, int *grp, int nG){
  int *dim = SLOT_INT_NULL(X, "Dim"), 
    *xp = SP_SLOT(X), 
    *xi = SI_SLOT(X) ;
  int nO = dim[0], nB = dim[1];   
  double *xx = SX_SLOT(X);
  double *lam = Alloca(nG, double),
    *beta = Alloca(nB, double);  
  R_CheckStack();
  int nn;

  AZERO(beta, nB);
  for (int i = 0; i < nG; i++){
    for (int j = grp[i]; j < grp[i + 1]; j++){                 // beta := X_k'y
      for (int p = xp[j]; p < xp[j + 1]; p++)                  
	beta[j] +=  xx[p] * y[xi[p]];
      beta[j] /= nO;                                           // beta := X_k'y/nO
    }
    nn = grp[i + 1] - grp[i];
    lam[i] = sqrt(sqr_length(beta + grp[i], nn)/nn);           // ||X_k'y/nO||/sqrt(nn)
  }
  return dmax(lam, nG);
}


/**
 * Update the negative penalized loglikelihood
 * 
 * @param ans an object of class "glmm"
 *
 */
/*
void rmm_update_plik(SEXP ans){
  int *dims = DIMS_SLOT(ans), *grp = GVAR_SLOT(ans),
    *ncols = NVAR_SLOT(ans);
  int nG = dims[nG_POS];
  double *beta = BETA_SLOT(ans), *lam = LAM_SLOT(ans), 
    *pfct = PFCT_SLOT(ans), *ctrl = CTRL_SLOT(ans);
  int wL = (int) ctrl[wlam_POS];

  rmm_update_llik(ans);  
  ctrl[plik_POS] = 0;
  for (int i = 0; i < nG; i++){
    if (pfct[i] != 0)
      ctrl[plik_POS] += sqrt(ncols[i] * sqr_length(beta + grp[i], ncols[i]));
  }
  ctrl[plik_POS] *= lam[wL];
  ctrl[plik_POS] += ctrl[llik_POS];
}
*/


/**
 * The main work horse for implementing the block-wise coordinate descent 
 * algorithm for the group lasso path with sparse design matrix. 
 * This is only computing the estimate for one specific lambda. 
 * 
 * @param X sparse design matrix 
 * @param y resposne
 * @param r vector of residuals (no initialization)
 * @beta beta vector of length nB. The input values are used 
 * as initial values.
 * @param grp an integer vector (of length nG + 1) indicating the grouping structre of X
 * @param ncols an integer vector (of length nG) indicating the number of columns each 
 group corresponds to
 * @param nG the number of groups in X
 * @param lam the value of the tuning parameter lambda
 * @param pfct the vector of penalty factors (length nG). 
 * For the unpenalized term, e.g., the intercept, the penalty should be zero. 
 * For other terms, it is usually 1. 
 *
 * @note the values of beta are used as initial estimates. At output, it stores 
 * the new estimates. 
 *
 * @return 0 means successful and 1 means iteration limits reached
 *
 */

int do_glasso(SEXP X, double *y, double *r, double *beta, 
	      int *grp, int *ncols, int nG, 
	      double lam, double *pfct){
  int *dim = SLOT_INT_NULL(X, "Dim"); 
  int nO = dim[0], nB = dim[1], iter; 
  int *xp = SP_SLOT(X), *xi = SI_SLOT(X);
  double *xx = SX_SLOT(X);
  double *beta_old = Alloca(nB, double),
    *plam = Alloca(nG, double);  
  double z0; 
  R_CheckStack();

  // initialize the residual r 
  Memcpy(r, y, nO);
  for (int j = 0; j < nB; j++){
    for (int p = xp[j]; p < xp[j + 1]; p++)
      r[xi[p]] -= xx[p] * beta[j];
  }      
  
  for (int i = 0; i < nG; i++) 
    plam[i] = lam * pfct[i] * sqrt(ncols[i]);                    // update threshold

  // run block-wise coordinate descent     
  for (iter = 0; iter < RMM_CDITER; iter++){
    R_CheckUserInterrupt();
    Memcpy(beta_old, beta, nB);      

    for (int i = 0; i < nG; i++){      
	
      for (int j = grp[i]; j < grp[i + 1]; j++){                 // r(-k) := r + x_k * beta_k
	if (beta[j] != 0){
	  for (int p = xp[j]; p < xp[j + 1]; p++)                  
	    r[xi[p]] +=  xx[p] * beta[j];
	}
      }

      AZERO(beta + grp[i], ncols[i]);
      for (int j = grp[i]; j < grp[i + 1]; j++){                 // b := t(x_k) * r(-k)
	for (int p = xp[j]; p < xp[j + 1]; p++)                  
	  beta[j] +=  xx[p] * r[xi[p]];
	beta[j] /= nO;
      }
	
      z0 = TH_glasso(beta + grp[i], ncols[i], plam[i]);          // update b using thresholding

      if (z0 != 0){
	for (int j = grp[i]; j < grp[i + 1]; j++){               // r := r(-k) - x_k * beta_k
	  for (int p = xp[j]; p < xp[j + 1]; p++)                  
	    r[xi[p]] -=  xx[p] * beta[j];
	}
      }
    }

    if (dist(beta, beta_old, nB, 1) < RMM_EPS) 
      break; 
  }

  if (iter == RMM_CDITER) return(1);
  return(0);

}


/**
 * Main work horse for estimating the non-mixture multinomial group lasso model. 
 * This function computes the solution for one particular lambda. 
 *
 * @param ans an object of class "glmm"
 * @param beta if not null, its value is used as initial values, and at output, 
 * it stores the optimal values. If null, the beta slot of ans is used.
 * @param lam the penalty parameter. Must not be null. 
 *
 * @return 0 means successful and 1 means iteration limits reached
 *
 */
int do_glm(SEXP ans, double *beta, double lam){
  int *dims = DIMS_SLOT(ans), *grp = GVAR_SLOT(ans),
    *ncols = NVAR_SLOT(ans);
  int nO = dims[nO_POS], nB = dims[nB_POS],
    nG = dims[nG_POS];
  double *y = Y_SLOT(ans),  
    *eta = ETA_SLOT(ans), *mu = MU_SLOT(ans), 
    *r = RESID_SLOT(ans), *wts = WTS_SLOT(ans),
    *ctrl = CTRL_SLOT(ans),
    *pfct = PFCT_SLOT(ans);
  if (beta == NULL)
    beta = BETA_SLOT(ans);
  double *beta_old = Alloca(nB, double),
    *theta_old = Alloca(nB, double),
    *theta_new = Alloca(nB, double),
    *yn = Calloc(nO, double);
  R_CheckStack();
  double t = 0, w = dmax(wts, nO);
  SEXP X = X_SLOT(ans);
  int iter;

  Memcpy(theta_old, beta, nB);
  for (iter = 0; iter < ctrl[MMit_POS]; iter++){
    rmm_update_prob(ans, beta);
    for (int i = 0; i < nO; i++)	
      r[i] = mu[i] * (1 - mu[i]); 
    t = 2 * dmax(r, nO) * w;                            // t := 2 * max(p * (1 - p)) * max(wts)
    for (int i = 0; i < nO; i++)                        // yn := w(y - mu)/t + eta, working response
      yn[i] = wts[i] * (y[i] - mu[i])/t + eta[i];        

    ctrl[cvg_POS] = do_glasso(X, yn, r, beta, grp, ncols, 
			      nG, lam/t, pfct);     

    t = (iter + 1.0)/(iter + 4.0);                      // apply the Nesterov step
    Memcpy(theta_new, beta, nB);                        // theta_new stores the new beta from the glasso              
    for (int j = 0; j < nB; j++)                        // beta := theta_new + l/(l + 3) * (theta_new - theta_old)
      beta[j] += t * (theta_new[j] - theta_old[j]);

    if (dist(beta, beta_old, nB, 1) < ctrl[eps_POS]) 
      break; 
    Memcpy(beta_old, beta, nB);
    Memcpy(theta_old, theta_new, nB);    
  }
  
  Free(yn);
  if (iter == ctrl[MMit_POS]) return(1);
  return(0);
}




/**
 * Main work horse for estimating the mixture multinomial group lasso model. 
 * This function computes the solution for one particular lambda
 * that is indicated in ctrl[wlam_POS]. 
 *
 * @param ans an object of class "glmm"
 * @param beta a pointer to the beta vector whose value is used as initial values, 
 * and at output, it stores the optimal values. Must not be null.
 * @param pi the mixture probability. If not null, use it as the initial values 
 * and to store the optimal ones. If null, the pi slot of ans is used.  
 * @param lam the penalty parameter. Must not be null. 
 *
 * @return 0 means successful and 1 means iteration limits reached
 *
 */
int do_glmm(SEXP ans, double *beta, double *pi, double lam){
  int *dims = DIMS_SLOT(ans),  *yi = YI_SLOT(ans),
    *sub = SUB_SLOT(ans);  
  int nO = dims[nO_POS], nB = dims[nB_POS],
    nP = dims[nP_POS], nC = dims[nC_POS],
    nG = dims[nG_POS], nS = dims[nS_POS],
    nI = dims[nI_POS];
  int nBS = nB * nS;  //length of the beta vector
  double *mu = MU_SLOT(ans), *wts = WTS_SLOT(ans),
    *ctrl = CTRL_SLOT(ans);
  double *W = Calloc(nI * nS, double),                   // nI by nS matrix of posteriors 
    *lpi = Alloca(nS, double),
    *beta_old = Alloca(nBS, double);
  R_CheckStack();
  double d, ws;
  int iter, pos;
  Memcpy(beta_old, beta, nBS);

  if (nS == 1){ /* non-mixture group lasso model */
    do_glm(ans, beta, lam);    
  } else {      /* mixture model */
    for (iter = 0; iter < ctrl[EMit_POS]; iter++){
      // E-step 
      rmm_update_estep(ans, beta, pi, W);

      // M-step 
      // 1) update pi                                     // pi = colMeans(W)
      sum_mat(nI, nS, 2, W, pi);
      for (int k = 0; k < nS; k++)
	pi[k] /= nI;

      // 2) update beta
      for (int k = 0; k < nS; k++){
	for (int i = 0; i < nO; i++)
	  wts[i] = W[sub[i] + k * nI];
	do_glm(ans, beta + nB * k, pow(pi[k], ctrl[tau_POS]) * lam);
      }

      if (dist(beta, beta_old, nBS, 1) < ctrl[eps_POS]) 
	break; 
      Memcpy(beta_old, beta, nBS);
    }
  }
  Free(W);
  if (iter == ctrl[EMit_POS]) return(1);
  return(0);
}


/**
 * R callable function that performs the group lasso for the mixture
 * multinomial model. It estimates the models for a grid of values 
 * specified in the lambda slot. 
 * 
 * @param ans an object of class "glmm"
 *
 */

SEXP R_do_glmm(SEXP ans){
  int *dims = DIMS_SLOT(ans);
  int nB = dims[nB_POS], nL = dims[nL_POS], 
    nS = dims[nS_POS]; 
  double *beta = BETA_SLOT(ans), *pi = PI_SLOT(ans),
    *ctrl = CTRL_SLOT(ans), *lam = LAM_SLOT(ans); 
  
  // iterate through lambdas 
  for (int l = 0; l < nL; l++){    
    if (ctrl[verb_POS])
      Rprintf("Lambda = %2.5f \n", lam[l]);
    ctrl[cvg_POS] += do_glmm(ans, beta, pi, lam[l]);    
    if (l < nL - 1){
      //for (int i = 0; i < nB * nS; i++)
      //		beta[i + nB * nS] += beta[i];             // add some noise for identifiability
      Memcpy(beta + nB * nS, beta, nB * nS);
      Memcpy(pi + nS, pi, nS);
      beta += nB * nS;
      pi += nS; 
    }
  }
  return R_NilValue;
}



/**
 * Compute the loglikelihood, df, AIC and BIC for an estimated multinomial mixture model. 
 * 
 * @param ans an object of class "mm"
 *
 * @return a list of four elements: loglikelihood, degree of freedom, AIC and BIC 
 */
SEXP R_rmm_BIC(SEXP ans){
  int *dims = DIMS_SLOT(ans), *sub = SUB_SLOT(ans),
    *yi = YI_SLOT(ans);
  int nO = dims[nO_POS], nB = dims[nB_POS], 
    nL = dims[nL_POS], nS = dims[nS_POS], 
    nP = dims[nP_POS], nI = dims[nI_POS];
  int nBS = nB * nS, pos;
  double *beta = BETA_SLOT(ans), *pi = PI_SLOT(ans),
    *mu = MU_SLOT(ans), *ctrl = CTRL_SLOT(ans);
  SEXP out = allocVector(VECSXP, 4);
  PROTECT(out);  
  SET_VECTOR_ELT(out, 0, allocVector(REALSXP, nL));                   // loglik
  SET_VECTOR_ELT(out, 1, allocVector(INTSXP, nL));                    // # nzero
  SET_VECTOR_ELT(out, 2, allocVector(REALSXP, nL));                   // AIC
  SET_VECTOR_ELT(out, 3, allocVector(REALSXP, nL));                   // BIC

  double *llik = REAL(VECTOR_ELT(out, 0)),                   
    *AIC = REAL(VECTOR_ELT(out, 2)),
    *BIC = REAL(VECTOR_ELT(out, 3));
  int *nzero = INTEGER(VECTOR_ELT(out, 1));
  double d, ws;
  double *W = Calloc(nI * nS, double);
  AZERO(llik, nL);

  for (int l = 0; l < nL; l++){

    AZERO(W, nI * nS);
    for (int k = 0; k < nS; k++){
      rmm_update_prob(ans, beta + nB * k);              // update prob using beta from the kth segment
      for (int i = 0; i < nP; i++)                      // llik for the ith individual
	W[sub[yi[i]] + nI * k] += (mu[yi[i]] > 0) ? log(mu[yi[i]]) : 0; 
      for (int i = 0; i < nI; i++)                      // llik + log(pi)
	W[i + nI * k] += log(pi[k]);
    }
    
    for (int i = 0; i < nI; i++){
      d = W[i];                                         // find the max of each row
      for (int k = 0; k < nS; k++){
	if (W[i + nI * k] > d)
	  d = W[i + nI * k];
      }
      ws = 0;
      for (int k = 0; k < nS; k++){                     // W_ij = exp( W_ij - max_j W_ij)
	pos = i + nI * k;
	W[pos] = exp(W[pos] - d);
	ws += W[pos];                                   // ws is the row sum
      }
      llik[l] += d + log(ws);                           // ith contributiion to loglikelihood
    }
    nzero[l] = nS - 1 + nnzero(beta, nBS);
    AIC[l] = -2 * llik[l] + nzero[l] * 2;
    BIC[l] = -2 * llik[l] + nzero[l] * log(nP);
    beta += nBS;
    pi += nS;
  }  
  UNPROTECT(1);
  Free(W);
  return out;
}






/*=======================================
 *    R callable utility functions      * 
 =======================================*/

/**
 * R callable function for rmm_get_maLam
 * 
 * @param X input sparse design matrix 
 * @param y resposne
 * @param GRP an integer vector (of length nG + 1) indicating the grouping structre of X
 *
 */
SEXP R_rmm_get_maxLam(SEXP X, SEXP y, SEXP GRP){
  int nG = LENGTH(GRP) - 1;
  double mlam = 0;  
  mlam = rmm_get_maxLam(X, REAL(y), INTEGER(GRP), nG);
  return ScalarReal(mlam);
}


/**
 * R callable version of mnl_update_plik
 * 
 * @param ans an object of class "glasso"
 *
 */
/*
SEXP R_rmm_update_plik(SEXP ans){
  rmm_update_plik(ans);
  return R_NilValue;
}
*/


/**
 * R callable function that performs the group lasso for Gaussian models. 
 * 
 * @param ans an object of class "glasso"
 *
 */
SEXP R_glnorm(SEXP ans){
  int *dims = DIMS_SLOT(ans), *grp = GVAR_SLOT(ans), 
    *ncols = NVAR_SLOT(ans);
  int nB = dims[nB_POS], nG = dims[nG_POS], 
    nL = dims[nL_POS]; 
  double *y = Y_SLOT(ans), *r = RESID_SLOT(ans), 
    *beta = BETA_SLOT(ans), *ctrl = CTRL_SLOT(ans), 
    *lam = LAM_SLOT(ans), *pfct = PFCT_SLOT(ans); 
  SEXP X = X_SLOT(ans);
  
  // iterate through lambdas 
  for (int l = 0; l < nL; l++){    
    do_glasso(X, y, r, beta, grp, ncols, nG, lam[l], pfct);    
    if (l < nL - 1){
      Memcpy(beta + nB, beta, nB);
      beta += nB;
    }
  }
  return R_NilValue;
}

/**
 * R callable function that performs the group lasso for the mnl model. 
 * 
 * @param ans an object of class "glmm"
 *
 */

SEXP R_glm(SEXP ans){
  int *dims = DIMS_SLOT(ans);
  int nB = dims[nB_POS], nL = dims[nL_POS]; 
  double *beta = BETA_SLOT(ans), *ctrl = CTRL_SLOT(ans),
    *lam = LAM_SLOT(ans); 
  
  // iterate through lambdas 
  for (int l = 0; l < nL; l++){    
    ctrl[cvg_POS] += do_glm(ans, beta, lam[l]);    
    if (l < nL - 1){
      Memcpy(beta + nB, beta, nB);
      beta += nB;
    }
  }
  return R_NilValue;
}


void rmm_init_estep(double *W, int nI, int nS){
  int *class = Alloca(nS, int);
  double *prob = Alloca(nS, double);
  double nm = 0.9 + 0.1 * (nS - 1);
  GetRNGstate();
  for (int i = 0; i < nI; i++){
    for (int j = 0; j < nS; j++)
      prob[j] = 1.0/nS;  
    rmultinom(1, prob, nS, class);
    for (int j = 0; j < nS; j++) {
      if (class[j] == 0) prob[j] = 0.1/nm;
      else prob[j] = 0.9/nm;
      W[nI * j + i] = prob[j];
    }
  }
  PutRNGstate();
}


SEXP R_rmm_init_estep(SEXP ans){
  int *dims = DIMS_SLOT(ans);  
  int nS = dims[nS_POS], nI = dims[nI_POS];
  
  SEXP W = allocVector(REALSXP, nI * nS);
  PROTECT(W);
  rmm_init_estep(REAL(W), nI, nS);
  UNPROTECT(1);
  return W;
}
