#define  _USE_MATH_DEFINES
#include <math.h>
#include <algorithm>
#include <limits>
#include <iomanip>

#include "logging.h"
#include "Compiler.h"
#include "Matrix.h"
#include "BayesParamManager.h"
#include "IndividualPriors.h"
#include "Design.h"
#include "PolyZO.h"
#include "ModelTypeParam.h"
#include "RowSetStats.h"
#include "ModelFile.h"
#include "Likelihood.h"
#include "OutputFormat.h"
#include "Data.h"
#include "asa239.h"

using namespace std;
int NUM_ROW;
int NUM_COL;
double elapsedTime = 0;

double pdfNorm(double x) 
{
	return 1/sqrt(2.0*3.1415926) * exp(-0.5*x*x);
}

double phi(double x)
{
    // constants
    double a1 =  0.254829592;
    double a2 = -0.284496736;
    double a3 =  1.421413741;
    double a4 = -1.453152027;
    double a5 =  1.061405429;
    double p  =  0.3275911;

    // Save the sign of x
    int sign = 1;
    if (x < 0)
        sign = -1;
    x = fabs(x)/sqrt(2.0);

    // A&S formula 7.1.26
    double t = 1.0/(1.0 + p*x);
    double y = 1.0 - (((((a5*t + a4)*t) + a3)*t + a2)*t + a1)*t*exp(-x*x);

    return 0.5*(1.0 + sign*y);
}

void testPhi()
{
    // Select a few input values
    double x[] = 
    {
        -3, 
        -1, 
        0.0, 
        0.5, 
        2.1 
    };

    // Output computed by Mathematica
    // y = Phi[x]
    double y[] = 
    { 
        0.00134989803163, 
        0.158655253931, 
        0.5, 
        0.691462461274, 
        0.982135579437 
    };

   	int numTests = sizeof(x)/sizeof(double);

    double maxError = 0.0;
    for (int i = 0; i < numTests; ++i)
    {
        double error = fabs(y[i] - phi(x[i]));
        if (error > maxError)
            maxError = error;

	double h = pdfNorm(x[i])/(1-phi(x[i]));
	cout << "x=" << x[i] << ", h=" << h << endl;
    }

        std::cout << "Maximum error: " << maxError << "\n";
} 

//test incomplete gamma function
void test01 ( )
{
	double aa = tgamma(0.5);
	cout << "gamma(0.5)=" << aa << ", dgamma(0.5)=" << dgamma(0.5) << endl;

	aa = tgamma(1.5);
	cout << "gamma(1.5)=" << aa << ", dgamma(1.5)=" << dgamma(1.5) << endl;

  double a;
  double fx;
  double fx2;
  int ifault;
  int n_data;
  double x;

	cout << "incomplete gamma(11.1, 9)=" << gammad(11.1, 9, &ifault) << endl;

  cout << "\n";
  cout << "TEST01:\n";
  cout << "  GAMMAD computes the incomplete Gamma function.\n";
  cout << "  Compare to tabulated values.\n";
  cout << "\n";
  cout << "             A             X      "
       << "    FX                        FX2\n";
  cout << "                                  "
       << "    (Tabulated)               (GAMMAD)            DIFF\n";
  cout << "\n";

  n_data = 0;

  for ( ; ; )
  {
    gamma_inc_values ( &n_data, &a, &x, &fx );

    if ( n_data == 0 )
    {
      break;
    }

    fx2 = gammad ( x, a, &ifault );

    cout << "  " << setprecision(4) << setw(12) << a
         << "  " << setprecision(4) << setw(12) << x
         << "  " << setprecision(16) << setw(24) << fx
         << "  " << setprecision(16) << setw(24) << fx2
         << "  " << setprecision(4) << setw(10) << fabs ( fx - fx2 ) << "\n";
  }

  return;
}

//JQC: One (j) coordinate move for Cox regression for both priors 
pair<double,double>  //numer, denom
OneCoordStep(
    const InvData::pair_range_type range,
    const Vector& wTX,
    double trustregion,
    vector<double> lifeTime,
    vector<int> censorInd,
    unsigned modelIndex,
    double alpha
    )
{  
	double num = 0;
	double den = 0;
	for( InvData::pair_vector_type::const_iterator ix=range.first; ix!=range.second; ix++ )
    	{
        	unsigned i = ix->first; //case #
        	double x_ij = ix->second; //value

	//for(int i=0; i < NUM_ROW; i++)
    	//{
		double y = lifeTime[i];						//exponential, gamma
		//cout << "take i=" << i << ", x_ij=" << x_ij << ", y=" << y << endl;

		if (modelIndex==2 || modelIndex ==3) {				//Weibull, Log-logistic
			//cout << "using new y, alpha=" << alpha << endl;
			y = pow(lifeTime[i], alpha);
		}
		else if (modelIndex==4) {					//log-normal
			y = log(lifeTime[i]);
		}

		double exp_r_i = exp(-wTX[i]);				//exp(-beta * X_i)
		double exp_delta_r_i = exp(trustregion * fabs(x_ij)); 

		if (modelIndex==1 || modelIndex==2) {
			num += x_ij * censorInd[i] - x_ij * y * exp_r_i;
			//den1 += x_ij * x_ij * lifeTime[i] * min(exp_r_i * exp_delta_r_i, exp_r_i/exp_delta_r_i);
			den += x_ij * x_ij * y * exp_r_i;
		}
		else if (modelIndex==3) {
			num += x_ij * censorInd[i] - (1+censorInd[i])*x_ij*exp_r_i*y/(1+y*exp_r_i);
			den += (1+censorInd[i])*y*exp_r_i* x_ij*x_ij/((1+y*exp_r_i)*(1+y*exp_r_i));
		}
		else if (modelIndex==4) {
			double z = (log(lifeTime[i]) - wTX[i])/alpha;
			double h = pdfNorm(z)/(1-phi(z));
			
			if (phi(z) !=1) {		//if equals 1, the term will not enter in the likelihood
				num += - x_ij*(censorInd[i]*z+(1-censorInd[i])*h)/alpha;
				den += x_ij*x_ij*(censorInd[i]-(1-censorInd[i])*h*(z-h))/(alpha*alpha);
			}

		}
		else if (modelIndex==5) {
			int ifault;
			double tmpArg = y*exp(-wTX[i]);
			double tmpIG = 1-gammad(tmpArg, alpha, &ifault);
			num += alpha*censorInd[i]*x_ij - censorInd[i]*x_ij*tmpArg - (1-censorInd[i])*dIG(alpha, tmpArg, 2)*tmpArg*x_ij/tmpIG;
			den += censorInd[i]*x_ij*x_ij*tmpArg
				+(1-censorInd[i])*dIG(alpha, tmpArg, 2)*tmpArg*x_ij*x_ij/tmpIG
				+(1-censorInd[i])*ddIG(alpha, tmpArg, 2)*tmpArg*tmpArg*x_ij*x_ij/tmpIG
				+(1-censorInd[i])*dIG(alpha, tmpArg, 2)*dIG(alpha, tmpArg, 2)*tmpArg*tmpArg*x_ij*x_ij/(tmpIG*tmpIG);
		}
	}

	//cout << "num=" << num << ", den=" << den << endl;
	return pair<double,double>( -num, den );		//numerator + denominator
}

// JQC: "wakeup plan" for Laplace Prior (What was it for?)
// 	From D Lewis:
//		If a coefficient equals its prior mode, there's a good chance that that is the value it should have 
//		in the final fitted solution.  So we start start skipping updates of those coefficients, 
//		and each time we find the coefficient is still equal to the mode, we increase the number of iterations 
//		we skip it. 
// 	
void LRModel::ZOLRBinary(
    MemRowSetIterator& rowSet,
    const InvData& invData, //input - generates design data
    const BayesParameter& bayesParam, //input
    vector<bool>& fixedParams,
    vector<double>& beta,
    int modelIndex
    )
{
	MemRowSet designX = rowSet.getRowSet();
	vector<double> lifeTime = designX.getLifeTime();
	vector<int> censorInd = designX.getCensorInd();
	ZOLRBinary(rowSet, invData, bayesParam, fixedParams, beta, lifeTime, censorInd, modelIndex); 
}

void LRModel::ZOLRBinary(
    MemRowSetIterator& rowSet,
    const InvData& invData, //input - generates design data
    const BayesParameter& bayesParam, //input
    vector<bool>& fixedParams,
    vector<double>& beta,
    vector<double> lifeTime,
    vector<int> censorInd,
    int modelIndex
    )
{
	cout << "...Starting ZOLRBinary()... modelIndex=" << modelIndex << endl;
	//for (int i=0; i < lifeTime.size(); i++) {
	//	cout << "lifeTime[ " << i << "]=" << lifeTime[i] << ", censorInd= " << censorInd[i] << endl;
	//}
    	unsigned nDesignVars = rowSet.dim();		//JQC number of features
    	//unsigned nRows = invData.n();			//number of subjects - in training  
    	unsigned nRows = lifeTime.size();		//number of subjects - in training  
    	if( nDesignVars==m_priorMode.numFeatures() ) ;//ok
    	else    throw DimensionConflict(__FILE__,__LINE__);

    	cout << "nDesignVars=" << nDesignVars << ", nRows=" << nRows << endl; 
	NUM_ROW = nRows;
	NUM_COL = nDesignVars;

    	double common_variance = bayesParam.getPriorVar(); 
    	cout << " common_variance = " << common_variance << endl;
    	double p;
    	if( Normal==bayesParam.getPriorType() ) {
		// analogous to 1/(2*tau_j) from Equation 8 of GLM2007
		p = 1 / (2 *common_variance);
    	}
    	else if( Laplace==bayesParam.getPriorType() ) {
		// analogous to lambda_j from Equation 9 of GLM2007
		double lambda = sqrt(2.0) / sqrt(common_variance); 
		p = lambda;
    	}
    	else
		throw runtime_error("ZO only allows Normal or Laplace prior");

    	//convert prior scale into penalty parameter
    	m_penalty.reset(nDesignVars, 1, p);

    	if(m_individualPriorsHolder!=0) {
	    	cout << "take care of m_individualPriors ..." << endl;
		m_individualPriorsHolder->reset();

		for(unsigned j=0; j<nDesignVars; j++) {
	    		double final_variance;
	    
	    		// type: 0 - not found; 1 - feature-level; 2 - coefficient-level; 
	    		int type;
	    		ModeVarSkew p = m_individualPriorsHolder->hasIndPrior(j,0,type);

	    		// if not found, continue
	    		if(type==0) continue;
	    
	    		// if the prior is specified in indprior file, overwrite the current value
	    		m_priorMode(j,0) = p.mode;
	    		m_priorSkew(j,0) = p.skew;
	    
	    		final_variance = type==0 ? common_variance : p.abs ? p.var : common_variance*p.var;
	    
	    		// penalty(j,k) is multiplier of the beta portion of the actual penalty
	    		if( Normal==bayesParam.getPriorType() ) {
				// analogous to 1/(2*tau_j) from Equation 8 of GLM2007
				m_penalty(j,0) = 1 / (2 * final_variance); //'2*' fixes bug, ver2.01
	    		}
	    		else if( Laplace==bayesParam.getPriorType() ) {
				// analogous to lambda_j from Equation 8 of GLM2007
				double lambda = sqrt(2.0) / sqrt(final_variance); 
				m_penalty(j,0) = lambda; 
	    		}
	    		else
				throw runtime_error("ZO only allows Normal or Laplace prior");
		}
    	}

    
    	//Vector trustregion( 1.0, nDesignVars );
    	Vector trustregion( 1.0, nDesignVars );
    	unsigned ndropped;
  
      	vector<unsigned> sleep( nDesignVars, 0 );
    	class WakeUpPlan {
        	vector<bool> plan;        
    	public:
        	bool operator()(unsigned int step ) {
            		if( step < plan.size() ) 
				return plan.at(step);
            		else 
				return 0==step%100;
        	}
        	WakeUpPlan(unsigned size=1000) : plan( size+1, false ) { //ctor
            		for(unsigned i=0;i*i<size;i++) plan[i*i] = true;     
		}
    	};
    	WakeUpPlan wakeUpPlan;	

    	unsigned iterLimit = m_modelType.getIterLimit();		//JQC: command line option
    	double thrConverge = m_modelType.getConvergenceThreshold();	//JQC: use this instead  
	
	m_alpha = m_modelType.getAlphaInit();
	cout << "init alpha=" << m_alpha << endl;
	//m_alpha = 0.5*sqrt(( sumLTsq - sumLT*sumLT/lifeTime.size() )/lifeTime.size());
	//m_alpha=0.353;
    
	time_t t1 = time(NULL);

	double d=0;
	//don't know if this is necessary
	//for (int i=0; i < NUM_ROW; i++) 	
		//score[i]=0;
	for (int i=0; i < nRows; i++) { 
		d += censorInd[i];
	}
	time_t t2 = time(NULL);
	cout << " spent " << t2 - t1 << " to get design matrix..." << endl;

       	time_t t3 = time(NULL);	
    	int n1coordsteps=0, ndoubltries=0, nslept=0;

    	Vector wTX( 0.0, nRows );
    	for( unsigned j = 0; j < nDesignVars; j++ ) {
        	if( beta[j] != 0.0 ) {
	    		const InvData::pair_range_type range = invData.getRange(j);
	    		for( InvData::pair_vector_type::const_iterator ix=range.first; ix!=range.second; ix++ ) {
				wTX[ ix->first ] += beta[j] * ix->second;
				//cout << "i=" << ix->first << ",j=" << j << ",x=" << ix->second << endl;
	    		}
		}
    	}
    
    	Vector wTXprev = wTX;
    	bool converge = false;
    	unsigned k;

    	for( k=0; !converge; k++ ) 					//---iterations loop--
    	{
        	vector<double> wPrev = beta;
        	//double dot_product_total_change = 0.0;
        	ndropped = 0;

		double tmpNum=0, tmpDen=0;
		if (modelIndex == 2) {		//Weibull
			tmpNum = -d/m_alpha;
			tmpDen = d/(m_alpha*m_alpha);
			for (int i=0; i < NUM_ROW; i++) {
				double tmp1 = -censorInd[i]*log(lifeTime[i])+pow(lifeTime[i], m_alpha)*exp(-wTX[i])*log(lifeTime[i]);
				tmpNum += -censorInd[i]*log(lifeTime[i])+pow(lifeTime[i], m_alpha)*exp(-wTX[i])*log(lifeTime[i]);

				double tmp2 = pow(lifeTime[i], m_alpha)*log(lifeTime[i])*log(lifeTime[i])*exp(-wTX[i]);
				tmpDen += pow(lifeTime[i], m_alpha)*log(lifeTime[i])*log(lifeTime[i])*exp(-wTX[i]);

				if (isnan(tmp1)) {
					cout << "For i=" << i << ",censorInd=" << censorInd[i] << ",lifetime=" << lifeTime[i] << ",wTX=" << wTX[i] << endl;
				}
			}	
		}
		else if (modelIndex == 3) {	//log-logistic
			tmpNum = -d/m_alpha;
			tmpDen = d/(m_alpha*m_alpha);
			for (int i=0; i < NUM_ROW; i++) {
				double logTime = log(lifeTime[i]);
				double y = pow(lifeTime[i], m_alpha);
				double lambda = exp(-wTX[i]);
				tmpNum += -censorInd[i]*logTime + (1+censorInd[i])*lambda*y*logTime/(1+lambda*y);
				tmpDen += (1+censorInd[i])*lambda*y*logTime*logTime/((1+lambda*y)*(1+lambda*y));
			}	
		}
		else if (modelIndex == 4) {	//log-normal
			tmpNum = d/m_alpha;
			tmpDen = -d/(m_alpha*m_alpha);
			for (int i=0; i < NUM_ROW; i++) {
				double z = (log(lifeTime[i]) - wTX[i])/m_alpha;
				if (phi(z) < 0.9999) {
					double h = pdfNorm(z)/(1-phi(z));
					tmpNum += -censorInd[i]*z*z/m_alpha - (1-censorInd[i])*z*h/m_alpha;
					tmpDen += 3*censorInd[i]*z*z/(m_alpha*m_alpha)
						- (1-censorInd[i])*z*h*(z*z-2)/(m_alpha*m_alpha)
						+ (1-censorInd[i])*z*z*h*h/(m_alpha*m_alpha);
				}
			}
		}
		else if (modelIndex == 5) {	//Gamma
			int ifault;
			tmpNum = d/tgamma(m_alpha)*dgamma(m_alpha);
			tmpDen = -d/(tgamma(m_alpha)*tgamma(m_alpha))*dgamma(m_alpha)*dgamma(m_alpha)+d/tgamma(m_alpha)*ddgamma(m_alpha);
			for (int i=0; i < NUM_ROW; i++) {
				double tmpArg =  lifeTime[i]*exp(-wTX[i]);
				double tmpIG = 1-gammad(tmpArg, m_alpha, &ifault); 
				tmpNum += censorInd[i]*wTX[i] - censorInd[i]*log(lifeTime[i])+ (1-censorInd[i])*dIG(m_alpha, tmpArg, 1)/tmpIG;
				tmpDen += (1-censorInd[i])*ddIG(m_alpha, tmpArg, 1)/tmpIG
					+(1-censorInd[i])*dIG(m_alpha, tmpArg,1)*dIG(m_alpha, tmpArg,1)/(tmpIG*tmpIG);
			}	

		}
		if (modelIndex == 1) 
			m_alpha = 1;		//a dummy
		else {
			double step = tmpNum/tmpDen;
			if ( m_alpha <= step ) { 
				m_alpha = 0.5 * m_alpha;
			}
			else {
				m_alpha = m_alpha - step;
			}
			
			//if (alpha < 0.105 || alpha > 1.05 ) {
			//	alpha = 0.105;
			//}	
			//m_alpha = 0.662;
			cout << "tmpNum=" << tmpNum << ", tmpDen=" << tmpDen << ", alpha=" << m_alpha << endl;
		}

        	for( unsigned j=0; j < nDesignVars; j++ ) 				//--design vars--
        	{
	    		double dw, dw_numerator, dw_denominator;
	    		const InvData::pair_range_type range = invData.getRange(j);	//get loop of subjects for j

			// JQC: just to verify, when m_penalty is set to 0, the result matches survreg() in R
			//m_penalty(j, 0) = 0;	
            		if( Normal==bayesParam.getPriorType() ) {
				//JQC: what are these initializations for ?
                		dw_numerator = (beta[j]==m_priorMode(j,0)) ? 0.0  //in case of infinite penalty // ver 2.57
                    			: - 2 * ( beta[j] - m_priorMode(j,0) ) * m_penalty(j,0);
		
                		dw_denominator = 2 * m_penalty(j,0);
				//JQC: compute tentative step (eq 18 GLM) for the jth param
                		pair<double,double> numer_denom = OneCoordStep(
					range, wTX, trustregion[j],    
					lifeTime, censorInd, modelIndex, m_alpha);
				dw_numerator += numer_denom.first;
				dw_denominator += numer_denom.second;

    				//update w
    				dw = dw_numerator / dw_denominator;
    				dw = min( max(dw,-trustregion[j]), trustregion[j] );
	    		}
            		else { //Laplace
				if( beta[j]!=m_priorMode(j,0) || wakeUpPlan( sleep[j] ) ){
                    			pair<double,double> numer_denom = OneCoordStep(
						range, wTX, trustregion[j],  
						lifeTime, censorInd, modelIndex, m_alpha);
                    			n1coordsteps++;

                    			if( beta[j]-m_priorMode(j,0)>0 ) {
                        			dw = (numer_denom.first - m_penalty(j,0)) / numer_denom.second;
                    			}
                    			else if( beta[j]-m_priorMode(j,0)<0 ) {
                        			dw = (numer_denom.first + m_penalty(j,0)) / numer_denom.second;
                    			}
                    			else // at mean right now
                    			{ // try both directions
                        			dw = 0;
                        			if( m_priorSkew(j,0) > -1 ) { //positive allowed
                            				double dwPlus = (numer_denom.first - m_penalty(j,0)) / numer_denom.second;
                            				if( dwPlus>0 )
                                				dw = dwPlus;
                        			}
                        			if( dw==0 ) //positive step not allowed or unsuccessful
                        			{
                            				if( m_priorSkew(j,0) < 1 ) { //negative allowed
                                				double dwMinus = (numer_denom.first + m_penalty(j,0)) / numer_denom.second;
                                				if( dwMinus<0 )
                                    					dw = dwMinus; //othw dw stays at 0
                            				}
                        			}
                    			}
				}
				else {
					std::cout << "SLEEP" << wakeUpPlan(sleep[j]) << nslept << "\n";
                    			nslept++;//Log(9)<<"\nNo Wake-up j/sleep[j] "<<j<<"/"<<sleep[j]
				}	

				if (modelIndex != 4)		//don't do this for log-normal yet ! 2/6/2011 
                			dw = min( max(dw,-trustregion[j]), trustregion[j] ); //for Normal prior this is done in OneCoordStep
		
                		//check if we crossed the break point
                		if(( beta[j] < m_priorMode(j,0) && m_priorMode(j,0) < beta[j]+dw )
		   			|| ( beta[j]+dw < m_priorMode(j,0) && m_priorMode(j,0) < beta[j] ))
                    			dw = m_priorMode(j,0) - beta[j]; //stay at mean

            		} //Laplace
	  
            		beta[j] += dw;
            		if( beta[j]==m_priorMode(j,0) )   ndropped++;
            		//update local data
			if (dw != 0) {
	    			for( InvData::pair_vector_type::const_iterator ix=range.first; ix!=range.second; ix++ )
            			{
                			unsigned i = ix->first;
                			double x = ix->second;
                			//if( 0.0==x ) continue; //just in case - shouldn't ever hit this
					//double old = wTX[i];
                			wTX[i] += dw * x;
					//dot_product_total_change += fabs(dw * x); 
            			}
			}

            		//trust region update
	    		trustregion[j] = max( 2*fabs(dw), trustregion[j]/2 );
        	}//--design vars-- j --

        	double sum_abs_r = 0;
        	double sum_abs_dr = 0.0;
        	for( unsigned i=0; i< nRows; i++ ) {
            		sum_abs_r += fabs(wTX[i]);
        		//ZO stopping
            		sum_abs_dr += fabs(wTX[i]-wTXprev[i]);
		}	
        	//double dot_product_rel_change = dot_product_total_change/(1+sum_abs_r);	//DL stopping
        	double rel_sum_abs_dr = sum_abs_dr/(1+sum_abs_r);			//ZO stopping

        	converge =( k >= iterLimit  || rel_sum_abs_dr<thrConverge );		//ZO stopping
		//DL stopping || dot_product_rel_change<thrConverge );

        	Log(5)<<"\nZO iteration "<<k+1
	      		<<"  Dot product abs sum "<<sum_abs_r
	      		<<"  Rel change "<<rel_sum_abs_dr //ZO stopping //DL stopping dot_product_rel_change
	      		<<"  Beta relative increment = "<<normdiff(beta,wPrev)/norm(wPrev)
	      		<<"  Beta components dropped: "<<ndropped
	      		<<"  Time "<<Log.time()<<endl;
       		 wTXprev = wTX;
    	}//---iter loop---
    
	time_t t4 = time(NULL);
	cout << "spent " << t4-t3 << " in the loop..." << endl;

    	Log(3)<<std::endl<<"1-coord steps: "<<n1coordsteps<<"  Double tries: "<<ndoubltries<<" Slept: "<<nslept;
    	Log(3)<<std::endl<<( k>=iterLimit ? "Stopped by iterations limit"  : "Stopped by original ZO rule" );
    	Log(3)<<std::endl<<"Built ZO model "<<k<<" iterations, Time "<<Log.time() << endl; 
    	Log(0).flush();

	time_t t11 = time(NULL);
 	cout << "for Laplace prior: spent " << elapsedTime << " seconds " << endl;
 	cout << "end of ZOLRBinary(): spent " << t11 -t1 << " seconds " << endl;

	//cout << "Final ... alpha = " << m_alpha << endl;
    	//for( unsigned j = 0; j < nDesignVars; j++ ) {
	//	cout << "beta[" << j << "]=" << beta[j] << endl;
	//}	

	//cout << "calling LogLikelihood() ...." << endl;
        //double eval = LogLikelihood(rowSet, beta, m_alpha, modelIndex, lifeTime, censorInd);
	//cout << "==>Model=" << modelIndex << ": log likelihood is " << eval << endl;
}

vector<int> LRModel::buildWordIndexForSample(RowSetIterator &drs, unsigned dim) const {
	drs.rewind();
	
	vector<int> wordIndex = vector<int>(dim+1, 0);
	for (const SparseVector& v = drs.xsparse(); drs.next(); ) {
		for (SparseVector::const_iterator i = v.begin(); i < v.end(); i++) {
			wordIndex[1 + i->first]++;
		}
	}

	int runningsum = 0;
	for (vector<int>::iterator i = wordIndex.begin(); i < wordIndex.end(); i++) {
		runningsum += *i;
		*i = runningsum;
	}

	drs.rewind();

	return wordIndex;
}

/* Perform loop of hyperparameter values for a given train/validation split */
vector<double>* LRModel::hyperParamLoop( MemRowSetIterator& drs,
		 unsigned dim, 
		 unsigned classes,
		 MemRowSetIterator& drsTest,
		 ModelType modelType,
		 const HyperParamPlan& hpPlan,
		 const class FixedParams& fixedParams,
		 vector<double> lifeTimeTrain, vector<double> lifeTimeTest,
		 vector<int> censorIndTrain, vector<int> censorIndTest, int modelIndex
    )

{  
    vector<double>* lglkli = new vector<double>();
	
    unsigned planSize = hpPlan.getPlan().size();
    ParamMatrix localbeta( dim, classes );
    
    InvData* p_invData = 0;
    if (modelType.getOptimizerType() == ModelType::ZO) {
	vector<int> wordIndex = buildWordIndexForSample(drs, dim);
	p_invData = new InvData(drs, classes, dim, wordIndex);
    }
    
    cout << "planSize = " << planSize << endl;

    for( unsigned iparam=0; iparam<planSize; iparam++ )
    {
        Log(5)<<"\nHyperparameter plan #"<<iparam+1<<" value="<<(hpPlan.getPlan()[iparam]);
        const BayesParameter& localBayesParam = hpPlan.getInstBayesParam( iparam );
	
        // build the model
	MemRowSetIterator drsIterator = MemRowSetIterator(drs);
	ZOLRBinary(drsIterator, *p_invData, localBayesParam, 
		const_cast<vector<bool>& >(fixedParams.classparam(0) ),
	 	const_cast<vector<double>& >(localbeta.classparam(0)),
		lifeTimeTrain, censorIndTrain, modelIndex );

	cout << "after ZOLBBinary()... " << endl;

        //double eval = LogLikelihood( drsTest, classes, modelType, localbeta );
	// TEMP: this doesnot work, so passing in test set for lifetime and censorInd directly 
	//MemRowSet designX = drsTest.getRowSet();
	//vector<double> lifeTime = designX.getLifeTime();

        double eval = LogLikelihoodPenalty( 
		drsTest, localbeta.classparam(0), localBayesParam, m_penalty, m_alpha, modelIndex, lifeTimeTest, censorIndTest );
	cout << " loglike from test set is " << eval << endl;
        lglkli->push_back(eval);
	cout << "end of loop.... after push_back " << eval << endl;
    }
    
    delete p_invData;
    return lglkli;
}


double LRModel::AvgSquNorm(RowSetStats& stats) const { //separated from class Stats' to exclude constant term
    double s = 0.0;
    for(unsigned j=0; j<stats.Means().size(); j++) {
        if (j==0) //HACK: drop constant term
            continue;
        s += stats.Means()[j]*stats.Means()[j] + stats.Stddevs()[j]*stats.Stddevs()[j];
	Log(16)<<"AvgSquNorm: "<<stats.Means()[j]<<";"<<stats.Stddevs()[j]<<endl;
    }
    return s;
}

void LRModel::tuneModel(   MemRowSetIterator & drs,
			   const vector<int>& rowIndex,
			   const HyperParamPlan& hyperParamPlan,
			   const class FixedParams& fixedParams,
			   RowSetStats& stats,
			   map<int,int>& origFeatureIndex
	       	)

{
	int modelIndex = m_modelType.getModelIndex();
	unsigned startIndex = modelIndex;
	unsigned endIndex = modelIndex;
	if (modelIndex == -1) {		//option to try all models 
		startIndex = 1;
		endIndex = 4;
	}
	cout << "got modelindex=" << modelIndex << endl;

    // build the model
    if( ! hyperParamPlan.hasGrid() ) {    
	cout << " does not have grid.. " << endl;
        BayesParameter localBayesParam;
        if( hyperParamPlan.getAutoHPar() ) {
		cout << "getAutoHPar() is yes" << endl;
	    double avgsqunorm = AvgSquNorm(stats)/(double)drs.dim();    // v3.21
            double hpar = Normal==hyperParamPlan.getPriorType() ? 
		1.0/avgsqunorm  : sqrt(avgsqunorm*2.0);
            localBayesParam = BayesParameter( hyperParamPlan.getPriorType(), hpar, hyperParamPlan.getSkew() );

            Log(3)<<"\nAverage square norm (no const term) "<<avgsqunorm<<" Hyperparameter "<<hpar<<endl;
        }
        else if( hyperParamPlan.isFixed() ) {
		cout << "hyperParamPlan.isFixed" << endl;
            localBayesParam = hyperParamPlan.getBp();
	}
        else throw logic_error("Inconsistent 'HyperParamPlan' setting");

	MemRowSetIterator& drsIterator = drs;
       	InvData invData(drsIterator, drs.c(), drs.dim(), rowIndex );

	for (int tmpIndex = startIndex; tmpIndex <= endIndex; tmpIndex++) {
		ZOLRBinary(drsIterator, invData, localBayesParam,
 		    const_cast<vector<bool>& >(fixedParams.classparam(0) ),
	 	    const_cast<vector<double>& >(m_beta.classparam(0)), tmpIndex);
	}

        m_bayesParam = localBayesParam;
    }
    else {    //model averaging/selection
	cout << " begin cross validation... " << endl;

        unsigned planSize = hyperParamPlan.getPlan().size();
        vector<double> lglkliMean( planSize, 0.0 );
        vector<double> lglkliMeanSqu( planSize, 0.0 );
        const double randmax = RAND_MAX;

        const std::vector<double>& hpplan = hyperParamPlan.getPlan();

        //prepare CV
        unsigned nfolds = hyperParamPlan.getNfolds(); //10
        if( nfolds>drs.n() ) {
            nfolds = drs.n();
            Log(1)<<"\nWARNING: more folds requested than there are data. Reduced to "<<nfolds;
        }
        unsigned nruns = hyperParamPlan.getNruns(); //2
	cout << "nfolds=" << nfolds << ", drs.n()=" << drs.n() << ", nruns=" << nruns << endl;
        if( nruns>nfolds )  nruns = nfolds;

	RowSetSampler sampler = RowSetSampler(drs, nfolds);
	cout << "calling sampling.printIndex()..." << endl;
	sampler.printIndex();
        vector<bool> foldsAlreadyRun( nfolds, false );

	//This loops through all the models: eg exponential, weibull, log-logistic, lognormal 
	for (int tmpIndex = startIndex; tmpIndex <= endIndex; tmpIndex++) {
        	//cv loop
        	for( unsigned irun=0; irun < nruns; irun++ )
        	{
            		//next fold to run
            		unsigned x = unsigned( rand()*(nfolds-irun)/randmax );
            		unsigned ifold, y=0;
            		for( ifold=0; ifold<nfolds; ifold++ )
                	if( !foldsAlreadyRun[ifold] ) {
                    		if( y==x ) break;
                    		else y++;
                	}
            		foldsAlreadyRun[ifold] = true;
	    		Log(5) << "\n-----------------------------------------";
            		Log(5)<<"\nCross-validation "<<nfolds<<"-fold; Run "<<irun+1<<" out of "<<nruns<<", fold "<<ifold << endl;
	    
            		//training
	    		MemRowSetIterator* cvtrain = sampler.getSample(ifold, false);
            		MemRowSetIterator* cvtest = sampler.getSample(ifold, true); 
	   	 	vector<double> lifeTimeTrain, lifeTimeTest;
	    		vector<int> censorIndTrain, censorIndTest; 
	   	 	sampler.getSampleSurvivalVars(lifeTimeTrain, lifeTimeTest, censorIndTrain, censorIndTest, ifold);
	    		cout << "size of train=" << lifeTimeTrain.size() << ", size of testing=" << lifeTimeTest.size() << endl;

            		vector<double>* loglikelihoods = hyperParamLoop( 
				*cvtrain, drs.dim(), drs.c(), *cvtest, //invData,
				m_modelType, hyperParamPlan, //bayesParameter, hpplan, 
		     		fixedParams, lifeTimeTrain, lifeTimeTest, censorIndTrain, censorIndTest, tmpIndex);
            		Log(5)<<"\nCross-validation test log-likelihood: ";
            		for( unsigned iparam=0; iparam<planSize; iparam++ ) {
                		double eval = (*loglikelihoods)[iparam];
                		Log(5)<<eval<<" ";
                		//arith ave of loglikeli, i.e. geometric mean of likelihoods
                		lglkliMean[iparam] = lglkliMean[iparam]*irun/(irun+1) + eval/(irun+1);
                		lglkliMeanSqu[iparam] = lglkliMeanSqu[iparam]*irun/(irun+1) + eval*eval/(irun+1);
            		}
	    
	    		delete loglikelihoods;
	    		delete cvtrain;
	    		delete cvtest;
        	}//cv loop
	
        	vector<double> lglkliStdErr; //( hpplan.size(), 0.0 );
        	for(unsigned i=0; i<planSize; i++ )
            		lglkliStdErr.push_back( sqrt( lglkliMeanSqu[i] - lglkliMean[i]*lglkliMean[i] )//stddev
                    		/ sqrt(double(nruns)) ); 
	
        	// best by cv
        	double bestEval = - numeric_limits<double>::max();
        	unsigned bestParam = unsigned(-1);
        	Log(5)<<"\nCross-validation results - hyperparameter values, "
	      		<<(Laplace==hyperParamPlan.getPriorType()?"prior var, ":"")
	      		<<"cv mean loglikelihood, std.error:";
        	for( unsigned i=0; i<planSize; i++ ) {
            		if( lglkliMean[i]>bestEval ) {
                		bestEval = lglkliMean[i];
                		bestParam = i;
            		}
            		Log(5)<<"\n\t"<<hpplan[i];
            		if(Laplace==hyperParamPlan.getPriorType()) 
                		Log(5)<<"\t"<<hyperpar2var(hyperParamPlan.getPriorType(),hyperParamPlan.getSkew(),hpplan[i]);
            		Log(5)<<"\t"<<lglkliMean[i]<<"\t"<<lglkliStdErr[i];
        	}
        	if( bestParam==unsigned(-1) )
            		throw runtime_error("No good hyperparameter value found");
        	if( hyperParamPlan.isErrBarRule() ) {
            		double valueneeded = lglkliMean[bestParam] - lglkliStdErr[bestParam];
            		unsigned oldBestParam = bestParam;
            		for( unsigned i=0; i<planSize; i++ )
                		if( lglkliMean[i]>=valueneeded ) {
                    			bestParam = i;
                    			break;
                		}
            			Log(5)<<"\nError bar rule selected hyperparameter value "<<hpplan[bestParam]<<" instead of "<<hpplan[oldBestParam];
        	}
        	Log(5)<<"\nBest hyperparameter value "<<hpplan[bestParam]<<" cv-average loglikely "<<bestEval;

        	//build final model
        	m_bayesParam = hyperParamPlan.getInstBayesParam( bestParam );

        	Log(5)<<std::endl<<"Starting final model after cv, Time "<<Log.time();
		MemRowSetIterator drsIterator = MemRowSetIterator(drs);
      		InvData invData(drsIterator, drs.c(), drs.dim(), rowIndex );
		//ZOLRBinary(drsIterator, invData, m_bayesParam, 
 		    //const_cast<vector<bool>& >(fixedParams.classparam(0) ),
	 	    //beta, tmpIndex );

		ZOLRBinary(drsIterator, invData, m_bayesParam,
 		    const_cast<vector<bool>& >(fixedParams.classparam(0) ),
	 	    const_cast<vector<double>& >(m_beta.classparam(0)), tmpIndex);

		vector<double> beta = const_cast<vector<double>&>(m_beta.classparam(0));
		cout << "calculating AIC/BIC ...." << endl;
		MemRowSet designX = drsIterator.getRowSet();
		vector<double> lifeTime = designX.getLifeTime();
		vector<int> censorInd = designX.getCensorInd();

        	double eval = LogLikelihood(drsIterator, beta, m_alpha, tmpIndex, lifeTime, censorInd);
		int k=2;
		if (tmpIndex==1)	k=1;
		cout << "After CV: Model=" << tmpIndex << ": log likelihood=" << eval << ", AIC=" << -2*eval+2*k << ", BIC=" << -2*eval+k*log(lifeTime.size()) << endl;

    		// report model
    		Log(3)<<std::endl<<"Beta:***";
    		m_beta.displayModel( Log(3), designX, origFeatureIndex );
	}
    }//model averaging/selection
}

void LRModel::train(MemRowSet& trainData, WriteModel& modelFile, IRowSet* heldout, map<int,int>& origFeatureIndex)
{
	cout << "in LRModel::train() ..." << endl;
    	m_lastDiff = vector<double>(trainData.n(), 0);

    	const BayesParameter& bayesParameter = m_hyperParamPlan.getBp();
    	m_bTrained = false;
   
    	MemRowSet& drs = trainData;

    	//int numClasses = drs.c();				//JQC - for BBR only
    	int numClasses = 1;					//JQC - for Cox regression
    	cout << "numClasses is " << numClasses << ".. dim of data is " << drs.dim() << endl;
    	m_beta = ParamMatrix( drs.dim(), numClasses );
    	m_priorMode = ParamMatrix( drs.dim(), numClasses);
    	m_priorSkew = ParamMatrix( drs.dim(), numClasses, m_hyperParamPlan.getSkew() );
    	m_penalty = ParamMatrix( drs.dim(), numClasses);
    	//FixedParams - what are they ?? - JQC
    	const vector<vector<bool> > dummy; //(drs.dim(),vector<bool>(drs.c(),false));
    	int classid = m_metadata.getRefClassId();
    	FixedParams fixedParams( m_modelType.isAllZeroClassMode() ? trainData.allzeroes() : dummy,
	     m_individualPriorsHolder,
	     classid, drs.dim(), numClasses );
    	Log(3)<<"FIXEDPARAMS "<<fixedParams<<endl;
    	Log(3)<<"\nTotal parameters: "<<drs.dim()*numClasses<<"\t Of them fixed: "
	  	<<fixedParams.count()<<endl;

    	// m_beta initialization, read in the init file
    	InitModel initfilemodel(m_initFName, m_metadata, bayesParameter, 
		fixedParams, *m_individualPriorsHolder, m_beta);

    	MemRowSetIterator* p_workingRs = new MemRowSetIterator(drs);
    	RowSetStats stats(drs);
    	RowSetIterator* it = drs.iterator();

    	//this sets m_beta and m_bayesParam
    	tuneModel( *p_workingRs, //bayesParameter, 
	       trainData.getWordIndex(),
	       m_hyperParamPlan,
	       fixedParams, 
	       stats, 
	       origFeatureIndex );
    
    	//JQC - not sure how the number is tracked ...
    	//Beta components dropped finally
    	//unsigned bdropped = m_priorMode.countequal( m_beta );
    	//Log(3)<<"\nBeta components dropped finally: "<<bdropped
	 // 	<<" Left: "<< m_beta.numClasses()*m_beta.numFeatures()-bdropped << endl;

    	//vector< vector<double> > resubstScore( drs.n(), vector<double>(numClasses,0.0) );
    
    	// report model
    	//Log(3)<<std::endl<<"Beta:***";
    	//m_beta.displayModel( Log(3), drs, origFeatureIndex );

    	delete it;
}


