#define  _USE_MATH_DEFINES
#include <math.h>
#include <algorithm>
#include <limits>
#include <iomanip>
#include <omp.h>
#include <time.h>

#include "logging.h"
#include "Compiler.h"
#include "Matrix.h"
#include "BayesParamManager.h"
#include "IndividualPriors.h"
#include "Design.h"
#include "PolyZO.h"
#include "ModelTypeParam.h"
#include "RowSetStats.h"
#include "ModelFile.h"
#include "Likelihood.h"
#include "OutputFormat.h"
#include "Data.h"
#include "asa239.h"

#if defined(USE_GCC)
	#define _isnan isnan
#endif

using namespace std;
int NUM_ROW;
int NUM_COL;
double elapsedTime = 0;

static bool singleVar = true;	

double pdfNorm(double x) 
{
	return 1/sqrt(2.0*3.1415926) * exp(-0.5*x*x);
}

double phi(double x)
{
	// constants
	double a1 =  0.254829592;
	double a2 = -0.284496736;
	double a3 =  1.421413741;
	double a4 = -1.453152027;
	double a5 =  1.061405429;
	double p  =  0.3275911;

	// Save the sign of x
	int sign = 1;
	if (x < 0)
		sign = -1;
	x = fabs(x)/sqrt(2.0);

	// A&S formula 7.1.26
	double t = 1.0/(1.0 + p*x);
	double y = 1.0 - (((((a5*t + a4)*t) + a3)*t + a2)*t + a1)*t*exp(-x*x);
	double pp = 0.5*(1.0 + sign*y);
	if(pp == 1.0)
		pp = 0.99999999;
	return pp;
}

void testPhi()
{
	// Select a few input values
	double x[] = 
	{
		-3, 
		-1, 
		0.0, 
		0.5, 
		2.1 
	};

	// Output computed by Mathematica
	// y = Phi[x]
	double y[] = 
	{ 
		0.00134989803163, 
		0.158655253931, 
		0.5, 
		0.691462461274, 
		0.982135579437 
	};

	int numTests = sizeof(x)/sizeof(double);

	double maxError = 0.0;
	for (int i = 0; i < numTests; ++i)
	{
		double error = fabs(y[i] - phi(x[i]));
		if (error > maxError)
			maxError = error;

		double h = pdfNorm(x[i])/(1-phi(x[i]));
		cout << "x=" << x[i] << ", h=" << h << endl;
	}

	std::cout << "Maximum error: " << maxError << "\n";
} 

static double partition(vector<double>& a, vector<int>& ind, int left, int right)
{
	double pivot = a[left];
	while (true)
	{
		while (a[left] < pivot) left++;
		while (a[right] > pivot) right--;
		if (left < right)
		{
			swap(a[left], a[right]);
			swap(ind[left], ind[right]);
		}
		else
		{
			return a[right];
		}
	}
}

static void quicksort(vector<double>& a, vector<int>& ind, int left, int right)
{
	if (left < right)
	{
		double pivot = partition(a, ind, left, right);
		quicksort(a, ind, left, pivot-1);
		quicksort(a, ind, pivot+1, right);
	}
}

static void quickSort(vector<double>& arr, vector<int>& ind, int left, int right) 
{

	int i = left, j = right;
	double tmp;
	double pivot = arr[(left + right) / 2];

	/* partition */
	while (i <= j) {
		while (arr[i] < pivot)
			i++;
		while (arr[j] > pivot)
			j--;
		if (i <= j) {
			tmp = arr[i];
			arr[i] = arr[j];
			arr[j] = tmp;
			int temp = ind[i];
			ind[i] = ind[j];
			ind[j] = temp;
			i++;
			j--;
		}
	};

	/* recursion */
	if (left < j)
		quickSort(arr, ind, left, j);
	if (i < right)
		quickSort(arr, ind, i, right);
}

void ReadBetaFromFile(string modFileName, vector<double>& beta, double& alpha, map<int,int>& origFeatureIndex, map<int,int>& reverseFeatIndex){
	FILE* fp = fopen(modFileName.c_str(),"r");
	char line[1024];
	int i;
	double d;
	beta.clear();
	int count = 0;
	while(fgets(line,1024,fp)){
		sscanf(line,"%d,%lf",&i,&d);
		if(count == 0){
			alpha = d;
		}
		else{
			beta.push_back(d);
		}
		count++;
	}
	fclose(fp);
	vector<double> betaCopy = beta;
	map<int,int>::iterator itRev = reverseFeatIndex.begin();
	count = 0;
	for(; itRev != reverseFeatIndex.end(); itRev++){
		betaCopy[itRev->second] = beta[count];
		count++;
	}
	beta = betaCopy;
	/*
	map<int,int> temp;
	map<int,int>::iterator itOrig = origFeatureIndex.begin();
	map<int,int>::iterator itRev = reverseFeatIndex.begin();
	for(; itOrig != origFeatureIndex.end(); itOrig++,itRev++){
		temp.insert(pair<int,int>(itOrig->first,itRev->first));
	}
	origFeatureIndex.clear();
	reverseFeatIndex.clear();
	origFeatureIndex = temp;
	itOrig = origFeatureIndex.begin();
	itRev = reverseFeatIndex.begin();
	for(; itOrig != origFeatureIndex.end(); itOrig++,itRev++){
		reverseFeatIndex.insert(pair<int,int>(itOrig->second,itOrig->first));
	}
	*/
}

//test incomplete gamma function 
// Sushil
//void test01 ( )
//{
//	double aa = tgamma(0.5);
//	cout << "gamma(0.5)=" << aa << ", dgamma(0.5)=" << dgamma(0.5) << endl;
//
//	aa = tgamma(1.5);
//	cout << "gamma(1.5)=" << aa << ", dgamma(1.5)=" << dgamma(1.5) << endl;
//
//  double a;
//  double fx;
//  double fx2;
//  int ifault;
//  int n_data;
//  double x;
//
//	cout << "incomplete gamma(11.1, 9)=" << gammad(11.1, 9, &ifault) << endl;
//
//  cout << "\n";
//  cout << "TEST01:\n";
//  cout << "  GAMMAD computes the incomplete Gamma function.\n";
//  cout << "  Compare to tabulated values.\n";
//  cout << "\n";
//  cout << "             A             X      "
//       << "    FX                        FX2\n";
//  cout << "                                  "
//       << "    (Tabulated)               (GAMMAD)            DIFF\n";
//  cout << "\n";
//
//  n_data = 0;
//
//  for ( ; ; )
//  {
//    gamma_inc_values ( &n_data, &a, &x, &fx );
//
//    if ( n_data == 0 )
//    {
//      break;
//    }
//
//    fx2 = gammad ( x, a, &ifault );
//
//    cout << "  " << setprecision(4) << setw(12) << a
//         << "  " << setprecision(4) << setw(12) << x
//         << "  " << setprecision(16) << setw(24) << fx
//         << "  " << setprecision(16) << setw(24) << fx2
//         << "  " << setprecision(4) << setw(10) << fabs ( fx - fx2 ) << "\n";
//  }
//
//  return;
//}

//JQC: One (j) coordinate move for Cox regression for both priors 
// returns numer, denom pair
pair<double,double>  OneCoordStep(const InvData::pair_range_type range,	const Vector& wTX,double trustregion,
	vector<double> lifeTime,vector<int> censorInd,unsigned modelIndex,double alpha)
{  
	double num = 0;
	double den = 0;
	int iter = 0;
	for(InvData::pair_vector_type::const_iterator ix = range.first; ix != range.second; ix++)
	{
		iter++;
		unsigned i = ix->first;						//case #
		double x_ij = ix->second;					//value

		//for(int i=0; i < NUM_ROW; i++)
		//{
		double y = lifeTime[i];						//exponential, gamma

		if(modelIndex == Weibull || modelIndex == Loglogistic)
		{	
			//cout << "using new y, alpha=" << alpha << endl;
			y = pow(lifeTime[i], alpha);
		}
		else if(modelIndex == Lognormal)
		{					
			y = log(lifeTime[i]);
		}

		double exp_r_i = exp(-wTX[i]);				//exp(-beta * X_i)

		//		if(exp_r_i > 1000000.00)
		//			exp_r_i = 1000000.00;

		double exp_delta_r_i = exp(trustregion * fabs(x_ij)); 


		if(modelIndex == Exponential || modelIndex == Weibull)
		{
			num += x_ij*censorInd[i] - x_ij*y*exp_r_i;
			//den1 += x_ij * x_ij * lifeTime[i] * min(exp_r_i * exp_delta_r_i, exp_r_i/exp_delta_r_i);
			den += x_ij*x_ij*y*exp_r_i;
		}
		else if(modelIndex == Loglogistic) 
		{
			num += x_ij * censorInd[i] - (1+censorInd[i])*x_ij*exp_r_i*y/(1+y*exp_r_i);
			den += (1+censorInd[i])*y*exp_r_i* x_ij*x_ij/((1+y*exp_r_i)*(1+y*exp_r_i));
		}
		else if(modelIndex == Lognormal) 
		{
			double z = (log(lifeTime[i]) - wTX[i])/alpha;
			double h = pdfNorm(z)/(1-phi(z));

			if(phi(z) !=1)							//if equals 1, the term will not enter in the likelihood
			{
				num += - x_ij*(censorInd[i]*z+(1-censorInd[i])*h)/alpha;
				den += x_ij*x_ij*(censorInd[i]-(1-censorInd[i])*h*(z-h))/(alpha*alpha);
			}

		}
		else if(modelIndex == 5) //Gamma
		{
			int ifault;
			double tmpArg = y*exp(-wTX[i]);
			double tmpIG = 1-gammad(tmpArg, alpha, &ifault);
			num += alpha*censorInd[i]*x_ij - censorInd[i]*x_ij*tmpArg - (1-censorInd[i])*dIG(alpha, tmpArg, 2)*tmpArg*x_ij/tmpIG;
			den += censorInd[i]*x_ij*x_ij*tmpArg
				+(1-censorInd[i])*dIG(alpha, tmpArg, 2)*tmpArg*x_ij*x_ij/tmpIG
				+(1-censorInd[i])*ddIG(alpha, tmpArg, 2)*tmpArg*tmpArg*x_ij*x_ij/tmpIG
				+(1-censorInd[i])*dIG(alpha, tmpArg, 2)*dIG(alpha, tmpArg, 2)*tmpArg*tmpArg*x_ij*x_ij/(tmpIG*tmpIG);
		}
		//		cout << "iter = " << iter <<", num=" << num << ", den=" << den <<"p"<< endl;	
	}
	return pair<double,double>(-num,den);		//numerator and denominator
}

// JQC: "wakeup plan" for Laplace Prior (What was it for?)
// 	From D Lewis:
//		If a coefficient equals its prior mode, there's a good chance that that is the value it should have 
//		in the final fitted solution.  So we start start skipping updates of those coefficients, 
//		and each time we find the coefficient is still equal to the mode, we increase the number of iterations 
//		we skip it. 
// 	
void LRModel::ZOLRBinary(MemRowSetIterator& rowSet,const InvData& invData, //input - generates design data
	const BayesParameter& bayesParam, vector<bool>& fixedParams,vector<double>& beta,int modelIndex)
{
	MemRowSet designX = rowSet.getRowSet();
	vector<double> lifeTime = designX.getLifeTime();
	vector<int> censorInd = designX.getCensorInd();
	ZOLRBinary(rowSet, invData, bayesParam, fixedParams, beta, lifeTime, censorInd, modelIndex); 
}

void LRModel::ZOLRBinary(MemRowSetIterator& rowSet,
	const InvData& invData, //input - generates design data
	const BayesParameter& bayesParam, //input
	vector<bool>& fixedParams,
	vector<double>& beta,
	vector<double> lifeTime,
	vector<int> censorInd,
	int modelIndex)
{
	// Sushil: I think initializing betas to zero is necessary, although with enough small convergence error, 
	// the solution converges to the same point even when betas are not initialized to zero (like it was earlier).
	for(int i = 0; i < (int)beta.size(); i++)
		beta[i] = 0.0;

	unsigned nDesignVars = rowSet.dim();					//JQC number of features
	//unsigned nRows = invData.n();							//number of subjects - in training  
	unsigned nRows = lifeTime.size();						//number of subjects - in training  
	if(nDesignVars == m_priorMode[omp_get_thread_num()].numFeatures()) ;//ok
	else    throw DimensionConflict(__FILE__,__LINE__);

	NUM_ROW = nRows;
	NUM_COL = nDesignVars;

	double common_variance = bayesParam.getPriorVar(); 
	cout << " Variance = " << common_variance << endl;
	double p;
	if(Normal == bayesParam.getPriorType()) 
	{
		// analogous to 1/(2*tau_j) from Equation 8 of GLM2007
		p = 1 / (2 *common_variance);
	}
	else if(Laplace == bayesParam.getPriorType()) 
	{
		// analogous to lambda_j from Equation 9 of GLM2007
		double lambda = sqrt(2.0) / sqrt(common_variance); 
		p = lambda;
	}
	else
		throw runtime_error("ZO only allows Normal or Laplace prior");

	//convert prior scale into penalty parameter
	m_penalty[omp_get_thread_num()].reset(nDesignVars, 1, p);
	//	cout<<endl<<"[This thread is: "<<omp_get_thread_num()<<"]"<<endl;

	if(m_individualPriorsHolder!=0) 
	{
		cout << "take care of m_individualPriors ..." << endl;
		m_individualPriorsHolder->reset();
		int j;
		double final_variance;
		for(j=0; j<nDesignVars; j++)
		{
			// type: 0 - not found; 1 - feature-level; 2 - coefficient-level; 
			int type;
			ModeVarSkew p = m_individualPriorsHolder->hasIndPrior(j,0,type);

			// if not found, continue
			if(type==0) continue;

			// if the prior is specified in indprior file, overwrite the current value
			m_priorMode[omp_get_thread_num()](j,0) = p.mode;
			m_priorSkew[omp_get_thread_num()](j,0) = p.skew;

			final_variance = type==0 ? common_variance : p.abs ? p.var : common_variance*p.var;

			// penalty(j,k) is multiplier of the beta portion of the actual penalty
			if(Normal == bayesParam.getPriorType()) 
			{
				// analogous to 1/(2*tau_j) from Equation 8 of GLM2007
				m_penalty[omp_get_thread_num()](j,0) = 1 / (2 * final_variance); //'2*' fixes bug, ver2.01
			}
			else if(Laplace == bayesParam.getPriorType()) 
			{
				// analogous to lambda_j from Equation 8 of GLM2007
				double lambda = sqrt(2.0) / sqrt(final_variance); 
				m_penalty[omp_get_thread_num()](j,0) = lambda; 
			}
			else
			{
				throw runtime_error("ZO only allows Normal or Laplace prior");
			}
		}
	}

	//Vector trustregion( 1.0, nDesignVars );
	Vector trustregion( 1.0, nDesignVars );
	unsigned ndropped;

	vector<unsigned> sleep( nDesignVars, 0 );
	class WakeUpPlan{
		vector<bool> plan;        
	public:
		bool operator()(unsigned int step ) {
			if( step < plan.size() ) 
				return plan.at(step);
			else 
				return 0==step%100;
		}
		WakeUpPlan(unsigned size=1000) : plan( size+1, false ) //ctor
		{
			for(unsigned i=0;i*i<size;i++) plan[i*i] = true;     
		}
	};
	WakeUpPlan wakeUpPlan;	

	unsigned iterLimit = m_modelType.getIterLimit();		//JQC: command line option
	double thrConverge = m_modelType.getConvergenceThreshold();	//JQC: use this instead  

	m_alpha[omp_get_thread_num()] = m_modelType.getAlphaInit();
	//	cout << "init alpha=" << m_alpha << endl;
	//m_alpha = 0.5*sqrt(( sumLTsq - sumLT*sumLT/lifeTime.size() )/lifeTime.size());
	//m_alpha=0.353;

	time_t t1 = time(NULL);

	double d=0;
	//don't know if this is necessary
	//for (int i=0; i < NUM_ROW; i++) 	
	//score[i]=0;
	for (int i=0; i < nRows; i++) 
	{ 
		d += censorInd[i];
	}
	time_t t2 = time(NULL);
	//	cout << " ...spent " << t2 - t1 << " to get design matrix..." << endl;

	time_t t3 = time(NULL);	
	int n1coordsteps=0, ndoubltries=0, nslept=0;

	Vector wTX( 0.0, nRows );
	for( unsigned j = 0; j < nDesignVars; j++ ) 
	{
		if( beta[j] != 0.0 ) 
		{
			const InvData::pair_range_type range = invData.getRange(j);
			for( InvData::pair_vector_type::const_iterator ix=range.first; ix!=range.second; ix++ ) 
			{
				wTX[ ix->first ] += beta[j] * ix->second;
				//cout << "i=" << ix->first << ",j=" << j << ",x=" << ix->second << endl;
			}
		}
	}

	Vector wTXprev = wTX;
	bool converge = false;
	unsigned k;

	for( k=0; !converge; k++ ) 					//---iterations loop--
	{
		vector<double> wPrev = beta;
		//double dot_product_total_change = 0.0;
		ndropped = 0;

		double tmpNum=0, tmpDen=0;
		if(modelIndex == Weibull)
		{		
			tmpNum = -d/m_alpha[omp_get_thread_num()];
			tmpDen = d/(m_alpha[omp_get_thread_num()]*m_alpha[omp_get_thread_num()]);
			for (int i=0; i < NUM_ROW; i++) 
			{
				double tmp1 = -censorInd[i]*log(lifeTime[i])+pow(lifeTime[i], m_alpha[omp_get_thread_num()])*exp(-wTX[i])*log(lifeTime[i]);
				// tmpNum += -censorInd[i]*log(lifeTime[i])+pow(lifeTime[i], m_alpha)*exp(-wTX[i])*log(lifeTime[i]);
				tmpNum += tmp1;

				double tmp2 = pow(lifeTime[i], m_alpha[omp_get_thread_num()])*log(lifeTime[i])*log(lifeTime[i])*exp(-wTX[i]);
				// tmpDen += pow(lifeTime[i], m_alpha)*log(lifeTime[i])*log(lifeTime[i])*exp(-wTX[i]);
				tmpDen += tmp2;

				if(_isnan(tmp1)) 
				{
					cout << "For i=" << i << ",censorInd=" << censorInd[i] << ",lifetime=" << lifeTime[i] << ",wTX=" << wTX[i] << endl;
				}
			}	
		}
		else if (modelIndex == Loglogistic)
		{
			tmpNum = -d/m_alpha[omp_get_thread_num()];
			tmpDen = d/(m_alpha[omp_get_thread_num()]*m_alpha[omp_get_thread_num()]);
			for (int i=0; i < NUM_ROW; i++) 
			{
				double logTime = log(lifeTime[i]);
				double y = pow(lifeTime[i], m_alpha[omp_get_thread_num()]);
				double lambda = exp(-wTX[i]);
				tmpNum += -censorInd[i]*logTime + (1+censorInd[i])*lambda*y*logTime/(1+lambda*y);
				tmpDen += (1+censorInd[i])*lambda*y*logTime*logTime/((1+lambda*y)*(1+lambda*y));
			}	
		}
		else if (modelIndex == Lognormal)
		{	
			tmpNum = d/m_alpha[omp_get_thread_num()];
			tmpDen = -d/(m_alpha[omp_get_thread_num()]*m_alpha[omp_get_thread_num()]);
			for (int i=0; i < NUM_ROW; i++) 
			{
				double z = (log(lifeTime[i]) - wTX[i])/m_alpha[omp_get_thread_num()];
				if (phi(z) < 0.9999) 
				{
					double h = pdfNorm(z)/(1-phi(z));
					tmpNum += -censorInd[i]*z*z/m_alpha[omp_get_thread_num()] - (1-censorInd[i])*z*h/m_alpha[omp_get_thread_num()];
					tmpDen += 3*censorInd[i]*z*z/(m_alpha[omp_get_thread_num()]*m_alpha[omp_get_thread_num()])
						- (1-censorInd[i])*z*h*(z*z-2)/(m_alpha[omp_get_thread_num()]*m_alpha[omp_get_thread_num()])
						+ (1-censorInd[i])*z*z*h*h/(m_alpha[omp_get_thread_num()]*m_alpha[omp_get_thread_num()]);
				}
			}
		}
		// Sushil
		//else if (modelIndex == 5) {	//Gamma
		//	int ifault;
		//	tmpNum = d/tgamma(m_alpha)*dgamma(m_alpha);
		//	tmpDen = -d/(tgamma(m_alpha)*tgamma(m_alpha))*dgamma(m_alpha)*dgamma(m_alpha)+d/tgamma(m_alpha)*ddgamma(m_alpha);
		//	for (int i=0; i < NUM_ROW; i++) {
		//		double tmpArg =  lifeTime[i]*exp(-wTX[i]);
		//		double tmpIG = 1-gammad(tmpArg, m_alpha, &ifault); 
		//		tmpNum += censorInd[i]*wTX[i] - censorInd[i]*log(lifeTime[i])+ (1-censorInd[i])*dIG(m_alpha, tmpArg, 1)/tmpIG;
		//		tmpDen += (1-censorInd[i])*ddIG(m_alpha, tmpArg, 1)/tmpIG
		//			+(1-censorInd[i])*dIG(m_alpha, tmpArg,1)*dIG(m_alpha, tmpArg,1)/(tmpIG*tmpIG);
		//	}	

		//}
		if (modelIndex == Exponential) 
			m_alpha[omp_get_thread_num()] = 1;		//a dummy
		else 
		{
			double step = tmpNum/tmpDen;
			if ( m_alpha[omp_get_thread_num()] <= step ) 
			{ 
				m_alpha[omp_get_thread_num()] = 0.5 * m_alpha[omp_get_thread_num()];
			}
			else 
			{
				m_alpha[omp_get_thread_num()] = m_alpha[omp_get_thread_num()] - step;
			}

			//if (alpha < 0.105 || alpha > 1.05 ) {
			//	alpha = 0.105;
			//}	
			//m_alpha = 0.662;
			//			cout << "tmpNum=" << tmpNum << ", tmpDen=" << tmpDen << ", alpha=" << m_alpha << endl;
		}

		for( unsigned j=0; j < nDesignVars; j++ ) 				//--design vars--
		{
			double dw, dw_numerator, dw_denominator;
			const InvData::pair_range_type range = invData.getRange(j);	//get loop of subjects for j

			// JQC: just to verify, when m_penalty is set to 0, the result matches survreg() in R
			//m_penalty(j, 0) = 0;	
			if(Normal == bayesParam.getPriorType()) 
			{
				//JQC: what are these initializations for ?
				dw_numerator = (beta[j]==m_priorMode[omp_get_thread_num()](j,0)) ? 0.0  //in case of infinite penalty // ver 2.57
					: - 2 * ( beta[j] - m_priorMode[omp_get_thread_num()](j,0) ) * m_penalty[omp_get_thread_num()](j,0);

				dw_denominator = 2 * m_penalty[omp_get_thread_num()](j,0);
				//JQC: compute tentative step (eq 18 GLM) for the jth param
				pair<double,double> numer_denom = OneCoordStep(range, wTX, trustregion[j],lifeTime, censorInd, modelIndex, m_alpha[omp_get_thread_num()]);
				dw_numerator += numer_denom.first;
				dw_denominator += numer_denom.second;

				//update w
				dw = dw_numerator / dw_denominator;
				dw = min(max(dw,-trustregion[j]),trustregion[j]);
			}
			else //Laplace
			{ 
				if(beta[j] != m_priorMode[omp_get_thread_num()](j,0) || wakeUpPlan(sleep[j]))
				{
					pair<double,double> numer_denom = OneCoordStep(range, wTX, trustregion[j], lifeTime, censorInd, modelIndex, m_alpha[omp_get_thread_num()]);

					n1coordsteps++;
					//					cout<< "n1coordsteps = "<<n1coordsteps <<endl;
					if(beta[j] - m_priorMode[omp_get_thread_num()](j,0) > 0) 
					{
						dw = (numer_denom.first - m_penalty[omp_get_thread_num()](j,0)) / numer_denom.second;
					}
					else if(beta[j] - m_priorMode[omp_get_thread_num()](j,0) < 0) 
					{
						dw = (numer_denom.first + m_penalty[omp_get_thread_num()](j,0)) / numer_denom.second;
					}
					else // at mean right now
					{ // try both directions
						dw = 0;
						if(m_priorSkew[omp_get_thread_num()](j,0) > -1) //positive allowed
						{ 
							double dwPlus = (numer_denom.first - m_penalty[omp_get_thread_num()](j,0)) / numer_denom.second;
							if(dwPlus > 0)
								dw = dwPlus;
						}
						if(dw == 0) //positive step not allowed or unsuccessful
						{
							if(m_priorSkew[omp_get_thread_num()](j,0) < 1) //negative allowed
							{ 
								double dwMinus = (numer_denom.first + m_penalty[omp_get_thread_num()](j,0)) / numer_denom.second;
								if(dwMinus < 0)
									dw = dwMinus; //otherwise dw stays at 0
							}
						}
					}
				}
				else 
				{
					std::cout << "SLEEP" << wakeUpPlan(sleep[j]) << nslept << "\n";
					nslept++;//Log(9)<<"\nNo Wake-up j/sleep[j] "<<j<<"/"<<sleep[j]
				}	

				//				if (modelIndex != Lognormal)		//don't do this for log-normal yet ! 2/6/2011 
				dw = min( max(dw,-trustregion[j]), trustregion[j] ); //for Normal prior this is done in OneCoordStep

				//check if we crossed the break point
				if((beta[j] < m_priorMode[omp_get_thread_num()](j,0) 
					&& m_priorMode[omp_get_thread_num()](j,0) < beta[j]+dw) 
					|| (beta[j]+dw < m_priorMode[omp_get_thread_num()](j,0) 
					&& m_priorMode[omp_get_thread_num()](j,0) < beta[j]))
					dw = m_priorMode[omp_get_thread_num()](j,0) - beta[j]; //stay at mean

			} //Laplace
			if(dw > 1000000 || dw < -1000000)
				cout << "Problem!\n";
			beta[j] += dw;
			if(beta[j] == m_priorMode[omp_get_thread_num()](j,0))   
				ndropped++;

			//update local data
			if (dw != 0) 
			{
				for(InvData::pair_vector_type::const_iterator ix = range.first; ix != range.second; ix++)
				{
					unsigned i = ix->first;
					double x = ix->second;
					//if( 0.0==x ) continue; //just in case - shouldn't ever hit this
					//double old = wTX[i];
					wTX[i] += dw * x;
					//dot_product_total_change += fabs(dw * x); 
				}
			}

			//trust region update
			trustregion[j] = max(2*fabs(dw), trustregion[j]/2);
		}//--design vars-- j --

		double sum_abs_r = 0;
		double sum_abs_dr = 0.0;
		for(unsigned i = 0; i < nRows; i++) 
		{
			sum_abs_r += fabs(wTX[i]);
			//ZO stopping
			sum_abs_dr += fabs(wTX[i]-wTXprev[i]);
		}	
		//double dot_product_rel_change = dot_product_total_change/(1+sum_abs_r);	//DL stopping
		double rel_sum_abs_dr = sum_abs_dr/(1+sum_abs_r);			//ZO stopping

		converge = (k >= iterLimit || rel_sum_abs_dr<thrConverge);		//ZO stopping
		//DL stopping || dot_product_rel_change<thrConverge );

		std::cout.setf(std::ios::fixed);
		std::cout.precision(4);

		Log(5)<<"ZO iteration "<<k+1
			<<"\t\tDot product abs sum "<<sum_abs_r
			<<"\t\tRel change "<<rel_sum_abs_dr //ZO stopping //DL stopping dot_product_rel_change
			<<"\t\tBeta relative increment = "<<normdiff(beta,wPrev)/norm(wPrev)
			<<"\t\tBeta components dropped: "<<ndropped
			<<"\t\tTime "<<Log.time()<<endl;
		wTXprev = wTX;
	}//---iter loop---

	time_t t4 = time(NULL);
	//	cout << "...spent " << t4-t3 << " in the loop..." << endl;

	Log(3)<<"1-coord steps: "<<n1coordsteps<<"  Double tries: "<<ndoubltries<<" Slept: "<<nslept;
	Log(3)<<std::endl<<( k>=iterLimit ? "Stopped by iterations limit"  : "Stopped by original ZO rule" );
	Log(3)<<std::endl<<"Built ZO model "<<k<<" iterations, Time "<<Log.time() << endl; 
	Log(0).flush();

	time_t t11 = time(NULL);
	//	cout << "...for Laplace prior: spent " << elapsedTime << " seconds " << endl;
	//	cout << "...end of ZOLRBinary(): spent " << t11 -t1 << " seconds " << endl;

	// only print for single prior variance with a specific model
	//	if (modelIndex != -1 && singleVar) 
	//	{
	//		cout << "Final ... non-zero alpha = " << m_alpha << endl;
	//		for(unsigned j = 0; j < nDesignVars; j++) 
	//		{
	//			if (beta[j] != 0)
	//				cout << "beta[" << j << "]=" << beta[j] << endl;
	//		}	
	//	}

	//cout << "calling LogLikelihood() ...." << endl;
	//double eval = LogLikelihood(rowSet, beta, m_alpha, modelIndex, lifeTime, censorInd);
	//cout << "==>Model=" << modelIndex << ": log likelihood is " << eval << endl;
}

vector<int> LRModel::buildWordIndexForSample(RowSetIterator &drs, unsigned dim) const 
{
	drs.rewind();

	vector<int> wordIndex = vector<int>(dim+1, 0);
	for (const SparseVector& v = drs.xsparse(); drs.next(); ) 
	{
		for (SparseVector::const_iterator i = v.begin(); i < v.end(); i++) 
		{
			wordIndex[1 + i->first]++;
		}
	}

	int runningsum = 0;
	for (vector<int>::iterator i = wordIndex.begin(); i < wordIndex.end(); i++) 
	{
		runningsum += *i;
		*i = runningsum;
	}

	drs.rewind();

	return wordIndex;
}

/* Perform loop of hyperparameter values for a given train/validation split */
vector<double>* LRModel::hyperParamLoop(MemRowSetIterator& drs,
	unsigned dim, 
	unsigned classes,
	MemRowSetIterator& drsTest,
	ModelType modelType,
	const HyperParamPlan& hpPlan,
	const class FixedParams& fixedParams,
	vector<double> lifeTimeTrain, vector<double> lifeTimeTest,
	vector<int> censorIndTrain, vector<int> censorIndTest, int modelIndex)

{  
	vector<double>* lglkli = new vector<double>();

	unsigned planSize = hpPlan.getPlan().size();

	vector<ParamMatrix> localbeta(planSize);
	for(int i = 0; i < planSize; i++)
		localbeta[i] = ParamMatrix(dim, classes);

	InvData* p_invData = 0;
	if (modelType.getOptimizerType() == ModelType::ZO) 
	{
		vector<int> wordIndex = buildWordIndexForSample(drs, dim);
		p_invData = new InvData(drs, classes, dim, wordIndex);
	}

	vector<BayesParameter> localBayesParam(planSize);
	lglkli->resize(planSize);

	MemRowSetIterator drsIterator = MemRowSetIterator(drs);
	int nThreads = omp_get_max_threads();
	omp_set_num_threads(nThreads);
	cout<<"Total Number of threads = "<<nThreads<<endl;
	int iparam;
#pragma omp parallel for
	for(iparam=0; iparam<planSize; iparam++)
	{
		//			Log(5)<<"\nHyperparameter plan #"<<iparam+1<<" value="<<(hpPlan.getPlan()[iparam]);
		localBayesParam[iparam] = hpPlan.getInstBayesParam(iparam);

		// build the model
		ZOLRBinary(drsIterator, *p_invData, localBayesParam[iparam],
			const_cast<vector<bool>& >(fixedParams.classparam(0)),
			const_cast<vector<double>& >(localbeta[iparam].classparam(0)),
			lifeTimeTrain, censorIndTrain, modelIndex);
		cout<<endl<<"[This thread is: "<<omp_get_thread_num()<<" iparam = "<<iparam<<"]"<<endl;
		//		cout << "after ZOLBBinary()... " << endl;

		//double eval = LogLikelihood( drsTest, classes, modelType, localbeta );
		// TEMP: this does not work, so passing in test set for lifetime and censorInd directly 
		//MemRowSet designX = drsTest.getRowSet();
		//vector<double> lifeTime = designX.getLifeTime();


		(*lglkli)[iparam] = 0;
#pragma omp critical
		(*lglkli)[iparam] = LogLikelihoodPenalty(drsTest, localbeta[iparam].classparam(0), 
			localBayesParam[iparam], m_penalty[omp_get_thread_num()], m_alpha[omp_get_thread_num()], 
			modelIndex, lifeTimeTest, censorIndTest);
	}
	/*		cout<<"\BETAS:\n";
	for(int i = 0; i < planSize; i++)
	{
	double maxBeta = -10000000000;
	vector<double> beta = const_cast<vector<double>&>(localbeta[i].classparam(0));
	for(int j = 0; j < (int)beta.size(); j++)
	{
	cout<<beta[j]<<" ";
	if(abs(beta[j]) > maxBeta)
	maxBeta = abs(beta[j]);
	}
	cout<<"\n";
	}
	cout<<"\n";
	for(int i = 0; i < planSize; i++)
	{
	vector<double> penal = const_cast<vector<double>&>(m_penalty[omp_get_thread_num()].classparam(0));
	for(int j = 0; j < (int)penal.size(); j++)
	{
	cout<<penal[j]<<" ";
	}
	cout<<"\n";
	}
	*/
	delete p_invData;
	return lglkli;
}


double LRModel::AvgSquNorm(RowSetStats& stats) const //separated from class Stats' to exclude constant term
{
	double s = 0.0;
	for(unsigned j = 0; j < stats.Means().size(); j++) 
	{
		if (j == 0) //HACK: drop constant term
			continue;
		s += stats.Means()[j]*stats.Means()[j] + stats.Stddevs()[j]*stats.Stddevs()[j];
		Log(16)<<"AvgSquNorm: "<<stats.Means()[j]<<";"<<stats.Stddevs()[j]<<endl;
	}
	return s;
}

void LRModel::tuneModel(MemRowSetIterator & drs, MemRowSet& testData, const vector<int>& rowIndex, const HyperParamPlan& hyperParamPlan,
	const class FixedParams& fixedParams, RowSetStats& stats, map<int,int>& origFeatureIndex, vector<string> keys, string modelFileName)

{
	int modelIndex = m_modelType.getModelIndex();
	unsigned startModelIndex = modelIndex;
	unsigned endModelIndex = modelIndex;
	if (modelIndex == -1) 
	{		//option to try all models 
		startModelIndex = 1;
		endModelIndex = 4;
	}

	//	cout << "got modelindex=" << modelIndex << endl;

	map<int,int> reverseFeatIndex;
	map<int,int>::iterator it = origFeatureIndex.begin();
	for(; it != origFeatureIndex.end(); it++){
		reverseFeatIndex.insert(pair<int,int>(it->second,it->first));
	}

	// build the model
	if(!hyperParamPlan.hasGrid()) 
	{    
		//		cout << " does not have grid.. " << endl;
		singleVar = true;
		BayesParameter localBayesParam;
		if(hyperParamPlan.getAutoHPar()) 
		{
			//			cout << "getAutoHPar() is yes" << endl;
			double avgsqunorm = AvgSquNorm(stats)/(double)drs.dim();    // v3.21
			double hpar = Normal == hyperParamPlan.getPriorType() ? 1.0/avgsqunorm : sqrt(avgsqunorm*2.0);

			localBayesParam = BayesParameter(hyperParamPlan.getPriorType(), hpar, hyperParamPlan.getSkew());

			Log(3)<<"\nAverage square norm (no const term) "<<avgsqunorm<<" Hyperparameter "<<hpar<<endl;
		}
		else if(hyperParamPlan.isFixed()) 
		{
			cout << "hyperParamPlan.isFixed" << endl;
			localBayesParam = hyperParamPlan.getBp();
		}
		else throw logic_error("Inconsistent 'HyperParamPlan' setting");

		MemRowSetIterator& drsIterator = drs;
		InvData invData(drsIterator, drs.c(), drs.dim(), rowIndex);

		for (int tmpIndex = startModelIndex; tmpIndex <= endModelIndex; tmpIndex++) 
		{
			ZOLRBinary(drsIterator, invData, localBayesParam, const_cast<vector<bool>&>(fixedParams.classparam(0)), const_cast<vector<double>&>(m_beta.classparam(0)), tmpIndex);
//			ReadBetaFromFile(modelFileName,const_cast<vector<double>&>(m_beta.classparam(0)),m_alpha[omp_get_thread_num()],origFeatureIndex,reverseFeatIndex);
			test(testData,tmpIndex);
			
			m_bayesParam = localBayesParam;
			MemRowSet designX = drsIterator.getRowSet();
			std::ostringstream thisModelFile;
			switch(tmpIndex){
			case Exponential:
				thisModelFile << "Exponential_" << modelFileName;
				break;
			case Weibull:
				thisModelFile << "Weibull_" << modelFileName;
				break;
			case Loglogistic:
				thisModelFile << "Loglogistic_" << modelFileName;
				break;
			case Lognormal:
				thisModelFile << "Lognormal_" << modelFileName;
				break;
			}
			m_beta.displayModel( Log(3), designX, origFeatureIndex, reverseFeatIndex, keys, thisModelFile.str(), m_alpha[omp_get_thread_num()]);
		}
	}
	else		//model averaging/selection
	{    
		//		cout << "Performing cross validation... " << endl;
		singleVar = false;

		unsigned planSize = hyperParamPlan.getPlan().size();
		vector<double> lglkliMean(planSize, 0.0);
		vector<double> lglkliMeanSqu(planSize, 0.0);
		const double randmax = RAND_MAX;

		const std::vector<double>& hpplan = hyperParamPlan.getPlan();

		//prepare CV
		unsigned nfolds = hyperParamPlan.getNfolds(); //10
		if(nfolds > drs.n()) 
		{
			nfolds = drs.n();
			Log(1)<<"\nWARNING: more folds requested than there are data. Reduced to "<<nfolds;
		}
		unsigned nruns = hyperParamPlan.getNruns(); //2
		//		cout << "nfolds=" << nfolds << ", drs.n()=" << drs.n() << ", nruns=" << nruns << endl;

		if(nruns > nfolds)  
			nruns = nfolds;

		srand ( time(NULL) );
		RowSetSampler sampler = RowSetSampler(drs, nfolds);
		//		cout << "calling sampling.printIndex()..." << endl;
		sampler.printIndex();

		//This loops through all the models: eg exponential, weibull, log-logistic, lognormal 
		for (int tmpIndex = startModelIndex; tmpIndex <= endModelIndex; tmpIndex++) 
		{
			vector<bool> foldsAlreadyRun(nfolds, false);
			//cv loop
			for(unsigned irun = 0; irun < nruns; irun++)
			{
				//next fold to run
				unsigned x = unsigned(rand()*(nfolds-irun)/randmax);
				unsigned ifold, y = 0;
				for(ifold = 0; ifold < nfolds; ifold++)
					if(!foldsAlreadyRun[ifold]) 
					{
						if(y == x) 
							break;
						else 
							y++;
					}
					foldsAlreadyRun[ifold] = true;
					Log(5) << "----------------------------------------------------------------------------------------------------------------------------------------------\n";
					Log(5)<<"\nCross-validation "<<nruns<<"-fold; Run "<<irun+1<<" out of "<<nruns<<", fold "<<ifold << endl;

					//training
					MemRowSetIterator* cvtrain = sampler.getSample(ifold, false);
					MemRowSetIterator* cvtest = sampler.getSample(ifold, true); 
					vector<double> lifeTimeTrain, lifeTimeTest;
					vector<int> censorIndTrain, censorIndTest; 
					sampler.getSampleSurvivalVars(lifeTimeTrain, lifeTimeTest, censorIndTrain, censorIndTest, ifold);
					//					cout << "size of train=" << lifeTimeTrain.size() << ", size of testing=" << lifeTimeTest.size() << endl;
					Log(3)<<"Data Sizes: Train: " << (int)lifeTimeTrain.size() << ", Test: " << (int)lifeTimeTest.size() << endl;
					vector<double>* loglikelihoods = hyperParamLoop(*cvtrain, drs.dim(), drs.c(), *cvtest, //invData,
						m_modelType, hyperParamPlan, //bayesParameter, hpplan, 
						fixedParams, lifeTimeTrain, lifeTimeTest, 
						censorIndTrain, censorIndTest, tmpIndex);
					Log(5)<<"\nCross-validation test log-likelihood: ";

					for(unsigned iparam = 0; iparam < planSize; iparam++) 
					{
						double eval = (*loglikelihoods)[iparam];
						Log(5)<<eval<<", ";
						//arith ave of loglikeli, i.e. geometric mean of likelihoods
						lglkliMean[iparam] = lglkliMean[iparam]*irun/(irun+1) + eval/(irun+1);
						lglkliMeanSqu[iparam] = lglkliMeanSqu[iparam]*irun/(irun+1) + eval*eval/(irun+1);
					}
					Log(5)<<"\n";

					delete loglikelihoods;
					delete cvtrain;
					delete cvtest;
			}//cv loop

			vector<double> lglkliStdErr; //( hpplan.size(), 0.0 );
			for(unsigned i = 0; i < planSize; i++)
				lglkliStdErr.push_back(sqrt(lglkliMeanSqu[i] - lglkliMean[i]*lglkliMean[i]) / sqrt(double(nruns)) ); // stdev/sqrt()

			// best by cv
			double bestEval = - numeric_limits<double>::max();
			unsigned bestParam = unsigned(-1);
			Log(5) << "----------------------------------------------------------------------------------------------------------------------------------------------\n";
			Log(5) << "----------------------------------------------------------------------------------------------------------------------------------------------\n";
			Log(5)<<"\nCross-validation results - hyperparameter values, "
				<<(Laplace==hyperParamPlan.getPriorType()?"prior var, ":"")
				<<"cv mean loglikelihood, std.error:";

			for(unsigned i = 0; i < planSize; i++) 
			{
				if(lglkliMean[i] > bestEval) 
				{
					bestEval = lglkliMean[i];
					bestParam = i;
				}
				Log(5)<<"\n"<<setw(15)<<hpplan[i];

				if(Laplace == hyperParamPlan.getPriorType()) 
					Log(5)<<setw(15)<<hyperpar2var(hyperParamPlan.getPriorType(),hyperParamPlan.getSkew(),hpplan[i]);

				Log(5)<<setw(15)<<lglkliMean[i]<<setw(15)<<lglkliStdErr[i];
			}
			if(bestParam == unsigned(-1))
				throw runtime_error("No good hyperparameter value found");

			if(hyperParamPlan.isErrBarRule()) 
			{
				double valueneeded = lglkliMean[bestParam] - lglkliStdErr[bestParam];
				unsigned oldBestParam = bestParam;
				for(unsigned i = 0; i < planSize; i++)
				{
					if(lglkliMean[i] >= valueneeded) 
					{
						bestParam = i;
						break;
					}
				}
				Log(5)<<"\nError bar rule selected hyperparameter value "<<hpplan[bestParam]<<" instead of "<<hpplan[oldBestParam];
			}

			//build final model
			m_bayesParam = hyperParamPlan.getInstBayesParam(bestParam);

			Log(5)<<"\nBest hyperparameter value "<<hpplan[bestParam]<<", Best variance = " << m_bayesParam.getPriorVar() <<", cv mean loglikely "<<bestEval;

			Log(5)<<"\n\nStarting final model after cv, Time "<<Log.time()<<",";
			MemRowSetIterator drsIterator = MemRowSetIterator(drs);
			InvData invData(drsIterator, drs.c(), drs.dim(), rowIndex);

			//ZOLRBinary(drsIterator, invData, m_bayesParam, 
			//const_cast<vector<bool>& >(fixedParams.classparam(0) ),
			//beta, tmpIndex );

			ZOLRBinary(drsIterator, invData, m_bayesParam,
				const_cast<vector<bool>& >(fixedParams.classparam(0)),
				const_cast<vector<double>& >(m_beta.classparam(0)),tmpIndex);

			vector<double> beta = const_cast<vector<double>&>(m_beta.classparam(0));
			//			cout << "calculating AIC/BIC ...." << endl;
			MemRowSet designX = drsIterator.getRowSet();
			vector<double> lifeTime = designX.getLifeTime();
			vector<int> censorInd = designX.getCensorInd();

			double eval = LogLikelihood(drsIterator, beta, m_alpha[omp_get_thread_num()], tmpIndex, lifeTime, censorInd);
			int k=2;
			if(tmpIndex == Exponential)	
				k=1;

			cout << "After CV: Model=" << tmpIndex << ": log likelihood=" << eval << ", AIC=" << -2*eval + 2*k << ", BIC=" << -2*eval + k*log((double)lifeTime.size()) << endl;

			// report model
			Log(3)<<std::endl<<"Beta:***";
			std::ostringstream thisModelFile;
			switch(tmpIndex){
			case Exponential:
				thisModelFile << "Exponential_" << modelFileName;
				break;
			case Weibull:
				thisModelFile << "Weibull_" << modelFileName;
				break;
			case Loglogistic:
				thisModelFile << "Loglogistic_" << modelFileName;
				break;
			case Lognormal:
				thisModelFile << "Lognormal_" << modelFileName;
				break;
			}
			
			m_beta.displayModel( Log(3), designX, origFeatureIndex, reverseFeatIndex, keys, thisModelFile.str(),m_alpha[omp_get_thread_num()]);
			test(testData,tmpIndex);
		}
	}//model averaging/selection
}

void LRModel::trainTest(MemRowSet& trainData, MemRowSet& testData, map<int,int>& origFeatureIndex, vector<string> keys, string modFileName)
{
	//	cout << "in LRModel::train() ..." << endl;
	m_lastDiff = vector<double>(trainData.n(), 0);

	const BayesParameter& bayesParameter = m_hyperParamPlan.getBp();
	m_bTrained = false;

	MemRowSet& drs = trainData;

	//int numClasses = drs.c();				//JQC - for BBR only
	int numClasses = 1;					//JQC - for Cox regression
	//	cout << "numClasses is " << numClasses << ".. dim of data is " << drs.dim() << endl;
	m_beta = ParamMatrix(drs.dim(), numClasses);

	m_priorMode.resize(omp_get_max_threads());
	m_priorSkew.resize(omp_get_max_threads());
	m_penalty.resize(omp_get_max_threads());
	m_alpha.resize(omp_get_max_threads(),0.0);

	for(int i = 0; i < omp_get_max_threads(); i++)
	{
		m_priorMode[i] = ParamMatrix(drs.dim(), numClasses);
		m_priorSkew[i] = ParamMatrix(drs.dim(), numClasses, m_hyperParamPlan.getSkew());
		m_penalty[i] = ParamMatrix( drs.dim(), numClasses);
	}

	//m_priorMode = ParamMatrix(drs.dim(), numClasses);
	//m_priorSkew = ParamMatrix(drs.dim(), numClasses, m_hyperParamPlan.getSkew());
	//m_penalty = ParamMatrix( drs.dim(), numClasses);

	//FixedParams - what are they ?? - JQC
	// Sushil: Change size of dummy variable to 1. This does not change anything because
	// fixed parameters are not used at all, but helps debug the code.
	// const vector<vector<bool> > dummy; //(drs.dim(),vector<bool>(drs.c(),false));
	const vector<vector<bool> > dummy(1,vector<bool>(1,false));

	int classid = m_metadata.getRefClassId();
	FixedParams fixedParams(m_modelType.isAllZeroClassMode() ? trainData.allzeroes() : dummy,
		m_individualPriorsHolder, classid, drs.dim(), numClasses);

	//	Log(3)<<"\n\nFIXEDPARAMS "<<fixedParams<<endl;
	Log(3)<<"\nTotal parameters: "<<drs.dim()*numClasses<<"\t Of them fixed: "<<fixedParams.count()<<endl;

	// m_beta initialization, read in the init file
	InitModel initfilemodel(m_initFName, m_metadata, bayesParameter, fixedParams, *m_individualPriorsHolder, m_beta);

	MemRowSetIterator* p_workingRs = new MemRowSetIterator(drs);
	RowSetStats stats(drs);
	RowSetIterator* it = drs.iterator();

	MemRowSetIterator drsTest = MemRowSetIterator(testData);
	//this sets m_beta and m_bayesParam
	tuneModel(*p_workingRs, testData, trainData.getWordIndex(), m_hyperParamPlan, fixedParams, stats, origFeatureIndex,keys,modFileName);

	//JQC - not sure how the number is tracked ...
	//Beta components dropped finally
	//unsigned bdropped = m_priorMode.countequal( m_beta );
	//Log(3)<<"\nBeta components dropped finally: "<<bdropped
	// 	<<" Left: "<< m_beta.numClasses()*m_beta.numFeatures()-bdropped << endl;

	//vector< vector<double> > resubstScore( drs.n(), vector<double>(numClasses,0.0) );

	// report model
	//Log(3)<<std::endl<<"Beta:***";
	//m_beta.displayModel( Log(3), drs, origFeatureIndex );

	delete it;
}

void LRModel::test(MemRowSet& testData, unsigned modelIndex)
{
	MemRowSetIterator drsIt = MemRowSetIterator(testData);
	vector<double> beta = const_cast<vector<double>&>(m_beta.classparam(0));
	vector<double> lifeTime = testData.getLifeTime();
	vector<int> censorInd = testData.getCensorInd();
	int nPatients = (int)lifeTime.size();

	int k=2;
	if(modelIndex == 1)	
		k=1;

	double eval = LogLikelihood(drsIt,beta,m_alpha[omp_get_thread_num()],modelIndex,lifeTime,censorInd);

	//Compute Harrell's C-Index
	double cIndex = ComputeCIndex(testData, modelIndex);

	//Compute Hosmer-Lemeshow calibration index
	vector<double> xisquared;
	vector<int> nbins;
	nbins.push_back(10); nbins.push_back(50); nbins.push_back(100); nbins.push_back(500); nbins.push_back(1000);
	ComputeHLStatistics(testData, modelIndex, xisquared, nbins);
	
	cout<<endl<<endl<<"TEST DATA STATISTICS:\n";
	cout<<"Total Patients = "<<nPatients<<", Likelihood = "<< eval << ", AIC = " << -2*eval+2*k << ", BIC = " << -2*eval+k*log((double)nPatients) << ", C-Index = " << cIndex;
	for(int i = 0; i < (int)xisquared.size(); i++)
		cout << ", HL Xi Squared Index(" << nbins[i] << ") = " << xisquared[i];
	cout << endl;

	vector<int> nPatientsPerGroup;
	vector<int> nDeadPerGroup;
	vector<double> meanSurvivalTime;
	ComputeAssignedRiskGroup(testData, modelIndex, nPatientsPerGroup, nDeadPerGroup, meanSurvivalTime);
	
	cout << std::setw(50) << "Risk Group" << std::setw(20) << "Total Patients"     << std::setw(20) << "Total Dead"     << std::setw(20) << "Mean Survival time" << endl;
	cout << std::setw(50) << "Low"        << std::setw(20) << nPatientsPerGroup[0] << std::setw(20) << nDeadPerGroup[0] << std::setw(20) << meanSurvivalTime[0]  << endl;
	cout << std::setw(50) << "Medium"     << std::setw(20) << nPatientsPerGroup[1] << std::setw(20) << nDeadPerGroup[1] << std::setw(20) << meanSurvivalTime[1]  << endl;
	cout << std::setw(50) << "High"       << std::setw(20) << nPatientsPerGroup[2] << std::setw(20) << nDeadPerGroup[2] << std::setw(20) << meanSurvivalTime[2]  << endl;
}

double LRModel::ComputeCIndex(MemRowSet testData, unsigned modelIndex)
{
	vector<double> beta = const_cast<vector<double>&>(m_beta.classparam(0));
	vector<double> lifeTime = testData.getLifeTime();
	vector<int> censorInd = testData.getCensorInd();
	int nPatients = (int)lifeTime.size();
	
	vector<double> risks;

	MemRowSetIterator it = MemRowSetIterator(testData);
	for(int i = 0; it.next(), i < nPatients; i++)
		risks.push_back(-it.xsparse().dot(beta));

	
	double riski, riskj;
	int Pcomp = 0;
	int Pconc = 0;
	for(int i = 0; i < nPatients; i++)
	{
		riski = risks[i];
		for(int j = 0; j < nPatients; j++)
		{
			if(i != j)
			{
				riskj = risks[j];
				if(lifeTime[i] < lifeTime[j] && censorInd[i] == 1)
				{
					Pcomp++;
					if(riski > riskj)
						Pconc++;
				}
			}
		}
	}
	cout<<"Pconc = "<<Pconc<<", Pcomp = "<<Pcomp<<endl;
	return (double)(Pconc)/((double)(Pcomp));
	
	/*
	vector<vector<double>> allScores;
	vector<unsigned> allYs;
	MemRowSetIterator& drsIt1 = MemRowSetIterator(testData);
	vector<double> predictScore(1);
	for(int i = 0; i < nPatients; i++)
	{
		predictScore[0] = -drsIt1.xsparse().dot(beta);
		allScores.push_back(predictScore);
		allYs.push_back(censorInd[i]);
		drsIt1.next();
	}
	double roc = calcROC(allScores,allYs,0);
	return roc;
	*/
}

/*void LRModel::ComputeHLStatistics(MemRowSet testData, unsigned modelIndex, vector<double>& xisquared, vector<int> nbins)
{
	vector<double> beta = const_cast<vector<double>&>(m_beta.classparam(0));
	vector<double> lifeTime = testData.getLifeTime();
	vector<int> censorInd = testData.getCensorInd();
	int nPatients = (int)lifeTime.size();

	MemRowSetIterator& it = MemRowSetIterator(testData);
	
	vector<double> risks;
	vector<double> lifeTimeDead;
	vector<int> inds_observed;
	vector<int> inds_expected;

	int count = 0;
	for(int i = 0; it.next(), i < nPatients; i++)
	{
//		if(censorInd[i] == 1)
		{
			risks.push_back(-it.xsparse().dot(beta));
			inds_observed.push_back(count);
			inds_expected.push_back(count++);
//			lifeTimeDead.push_back(-lifeTime[i]);
			lifeTime[i] = -lifeTime[i];
		}
	}

	// Resizing nPatients to only dead patients
//	nPatients = (int)lifeTimeDead.size();

	quickSort(risks,inds_observed,0,nPatients-1);
	quickSort(lifeTime,inds_expected,0,nPatients-1);
	for(int i = 0; i < nPatients; i++)
		lifeTime[i] = -lifeTime[i];


	for(int ii = 0; ii < (int)nbins.size(); ii++)
	{
		if(nPatients >= nbins[ii])
		{
			int nPatientsPerBin = (int)((double)nPatients/(double)nbins[ii] + 0.5);
			int nPatientsLastBin;
			int nbinsActual = nPatients/nPatientsPerBin;
			if(nbinsActual*nPatientsPerBin < nPatients)
			{
				nbinsActual++;
				nPatientsLastBin = nPatients - nbinsActual*nPatientsPerBin;
			}
			else
				nPatientsLastBin = nPatientsPerBin;

			vector<double> observed(nbinsActual,0.0);
			vector<double> expected(nbinsActual,0.0);

			int count = 0;
			for(int i = 0; i < nbinsActual-1; i++)
			{
				for(int j = 0; j < nPatientsPerBin; j++)
				{
					observed[i] += censorInd[inds_observed[count]];
					expected[i] += censorInd[inds_expected[count++]];
				}
				observed[i] = observed[i]/(double)nPatientsPerBin;
				expected[i] = expected[i]/(double)nPatientsPerBin;
			}
			for(int j = 0; j < nPatientsLastBin; j++)
			{
				observed[nbinsActual-1] += censorInd[inds_observed[count]];
				expected[nbinsActual-1] += censorInd[inds_expected[count++]];
			}
			observed[nbinsActual-1] = observed[nbinsActual-1]/(double)nPatientsLastBin;
			expected[nbinsActual-1] = expected[nbinsActual-1]/(double)nPatientsLastBin;

			double xisquaredTemp = 0.0;
			for(int i = 0; i < nbinsActual; i++)
			{
				if(expected[i] > 0.0)
					xisquaredTemp += pow((observed[i] - expected[i]),2)/expected[i];
			}
			xisquared.push_back(xisquaredTemp);
			if(ii == 0)
			{
				cout << endl << "Observed\t" << "Expected\t"<<endl;
				for(int i = 0; i < nbinsActual; i++)
					cout << endl << observed[i] << "\t" << expected[i] << endl;
			}
		}
	}
}*/
void LRModel::ComputeHLStatistics(MemRowSet testData, unsigned modelIndex, vector<double>& xisquared, vector<int> nbins)
{
	vector<double> beta = const_cast<vector<double>&>(m_beta.classparam(0));
	vector<double> lifeTime = testData.getLifeTime();
	vector<int> censorInd = testData.getCensorInd();
	int nPatients = (int)lifeTime.size();

	MemRowSetIterator it = MemRowSetIterator(testData);

	vector<double> risks;
	vector<double> hazards;
	vector<int> inds_observed;

	int count = 0;
	for(int i = 0; it.next(), i < nPatients; i++)
	{
		risks.push_back(-it.xsparse().dot(beta));
		inds_observed.push_back(i);
		hazards.push_back(ComputeHazard(it.xsparse().dot(beta), lifeTime[i], modelIndex));
	}

	FILE* fp = fopen("risks.txt","w");
	for(int i = 0; i < nPatients; i++)
		fprintf(fp,"%f %d\n",risks[i],censorInd[i]);
	fclose(fp);

	quickSort(risks,inds_observed,0,nPatients-1);

	fp = fopen("Observerd_Expected.txt","w");
	for(int ii = 0; ii < nPatients; ii++)
		fprintf(fp,"%d\t%f\n",censorInd[ii],hazards[ii]);
	fclose(fp);

	for(int ii = 0; ii < (int)nbins.size(); ii++)
	{
		if(nPatients >= nbins[ii])
		{
			int nPatientsPerBin = (int)((double)nPatients/(double)nbins[ii] + 0.5);
			int nPatientsLastBin;
			int nbinsActual = nPatients/nPatientsPerBin;
			if(nbinsActual*nPatientsPerBin < nPatients)
			{
				nPatientsLastBin = nPatients - nbinsActual*nPatientsPerBin;
				nbinsActual++;
			}
			else
				nPatientsLastBin = nPatientsPerBin;

			vector<double> observed(nbinsActual,0.0);
			vector<double> expected(nbinsActual,0.0);

			int count = 0;
			for(int i = 0; i < nbinsActual-1; i++)
			{
				for(int j = 0; j < nPatientsPerBin; j++)
				{
					observed[i] += censorInd[inds_observed[count]];
					expected[i] += hazards[inds_observed[count++]];
				}
			}
			for(int j = 0; j < nPatientsLastBin; j++)
			{
				observed[nbinsActual-1] += censorInd[inds_observed[count]];
				expected[nbinsActual-1] += hazards[inds_observed[count++]];
			}

			double xisquaredTemp = 0.0;
			for(int i = 0; i < nbinsActual; i++)
			{
				if(expected[i] > 0.0)
					xisquaredTemp += pow((observed[i] - expected[i]),2)/expected[i];
			}
			xisquared.push_back(xisquaredTemp);
			if(ii == 0)
			{
				cout << endl << "Observed\t" << "Expected\t";
				for(int i = 0; i < nbinsActual; i++)
					cout << endl << observed[i] << "\t\t" << expected[i];
			}
		}
	}
}

void LRModel::ComputeAssignedRiskGroup(MemRowSet testData, unsigned modelIndex, vector<int>& nPatientsPerGroup, vector<int>& nDeadPerGroup, vector<double>& meanSurvivalTime)
{
	vector<double> beta = const_cast<vector<double>&>(m_beta.classparam(0));
	vector<double> lifeTime = testData.getLifeTime();
	vector<int> censorInd = testData.getCensorInd();
	int nPatients = (int)lifeTime.size();

	MemRowSetIterator it = MemRowSetIterator(testData);

	vector<double> risks;
	vector<int> inds_observed;

	int count = 0;
	for(int i = 0; it.next(), i < nPatients; i++)
	{
		risks.push_back(-it.xsparse().dot(beta));
		inds_observed.push_back(i);
	}
	quickSort(risks,inds_observed,0,nPatients-1);

	int nLow = (int)((double)nPatients*0.33);
	int nMedium = (int)((double)nPatients*0.66);

	nPatientsPerGroup.push_back(nLow);
	nPatientsPerGroup.push_back(nMedium-nLow);
	nPatientsPerGroup.push_back(nPatients-nMedium);

	int nDead = 0;
	double survivalTime = 0;
	for(int i = 0; i < nLow; i++)
	{
		if(censorInd[inds_observed[i]] == 1)
		{
			nDead++;
			survivalTime += lifeTime[inds_observed[i]];
		}
	}
	nDeadPerGroup.push_back(nDead);
	meanSurvivalTime.push_back(survivalTime/nDead);

	nDead = 0;
	survivalTime = 0;
	for(int i = nLow+1; i < nMedium; i++)
	{
		if(censorInd[inds_observed[i]] == 1)
		{
			nDead++;
			survivalTime += lifeTime[inds_observed[i]];
		}
	}
	nDeadPerGroup.push_back(nDead);
	meanSurvivalTime.push_back(survivalTime/nDead);

	nDead = 0;
	survivalTime = 0;
	for(int i = nMedium+1; i < nPatients; i++)
	{
		if(censorInd[inds_observed[i]] == 1)
		{
			nDead++;
			survivalTime += lifeTime[inds_observed[i]];
		}
	}
	nDeadPerGroup.push_back(nDead);
	meanSurvivalTime.push_back(survivalTime/nDead);
}

double LRModel::ComputeHazard(double xbeta, double lifeTime, unsigned modelIndex)
{
	double S, lambda, alpha, mu, sigma;
	switch (modelIndex)
	{
	case Exponential: 
		lambda = exp(xbeta);
		S = exp(-lifeTime/lambda);
		break;
	case Weibull:
		lambda = exp(xbeta);
		alpha = m_alpha[omp_get_thread_num()];
		S = exp(-pow(lifeTime,alpha)/lambda);
		break;
	case Loglogistic:
		lambda = exp(xbeta);
		alpha = m_alpha[omp_get_thread_num()];
		S = 1.0/(1 + pow(lifeTime,alpha)/lambda);
		break;
	case Lognormal:
		mu = xbeta;
		sigma = m_alpha[omp_get_thread_num()];
		S = 1 - phi((log(lifeTime) - mu)/sigma);
		break;
	}
	if(S <= 0)
		return 0.0;
	else
		return -log(S);
}