

#include "slep.h"
#include "hr_time.h"
#include "epph.h"
#include "Logistic.h"

FUNVAL glLogisticRm1(double* A, double* y, double z, OPTS opt);
FUNVAL glLogisticRm2(double* A, double* y, double z, OPTS opt);
FUNVAL glLogisticRm3(double* A, double* y, double z, OPTS opt);

FUNVAL glLogisticR(double* A, double* y, double z, OPTS opt){
		if(opt.mFlag == 0 && opt.lFlag == 0 )
			return glLogisticRm1(A,y,z,opt);
		else if(opt.mFlag == 1 && opt.lFlag == 0 && opt.q == 2)
			return glLogisticRm2(A,y,z,opt);
		else if( opt.mFlag == 1 && opt.lFlag == 1 && opt.q == 2)
			return glLogisticRm3(A,y,z,opt);
		else
			errorOut("Select the appropriate method for glLogisticR, by setting opt.mFlag and opt.lFlag appropriately.");
}

/* Armijo Line Search */
FUNVAL glLogisticRm1(double* A, double* y, double z, OPTS opt){
	int m,n, iterStep, i,k, *ind;
	int bFlag;
	FUNVAL ret;
	double q, q_bar, m1, m2, *weight,  *gWeights, rsL2;
	double *b, *ATb, *norm_ATb, beta, gc, sc;
	double lambda, lambda_max, c, L, cp, ccp, alpha, alphap;
	double *x, *Ax, *xp, *Axp, *xxp, *aa, *As;
	double r_sum, l_sum, *ValueL, *funVal, fun_x, fun_s, fVal, fValp;
	double *norm_x_k, *tmp1, *tmp2;
	double *g, *v, *s, *bb, *weighty, *prob;

	m = opt.m;
	n = opt.n;
	rsL2 = opt.rsL2;

	if(z <=  0.0)
		errorOut("glLogisticRm1: z should be positive.");

	// opts = sllopts();
	initNormalization(&opt,A);

	if( !hasGot(&opt,"ind"))
		errorOut("glLogisticRm1: 'ind' missing in opts");
	else{
		ind = opt.ind;
		k = opt.ind_size - 1;
		if( ind[k] != n)
			errorOut("glLogisticRm1: Check opt.ind " );
	}

	weight = (double *) malloc( sizeof(double) * m);
	b = (double *) malloc( sizeof(double) * m);
	gWeights = (double *) malloc( sizeof(double) * k);
	ATb = (double *) malloc(sizeof(double) * n);
	norm_ATb = (double *) malloc(sizeof(double) * k);
	x = (double * ) malloc( sizeof(double) * n);
	Ax = (double *) malloc( sizeof(double) * m);
	xp = (double * ) malloc( sizeof(double) * n);
	xxp = (double * ) malloc( sizeof(double) * n);
	Axp = (double *) malloc( sizeof(double) * m);
	aa = (double *) malloc( sizeof(double) * m);
	As = (double *) malloc( sizeof(double) * m);
	s = (double *) malloc( sizeof(double) * n);
	g = (double *) malloc( sizeof(double) * n);
	v = (double *) malloc( sizeof(double) * n);
	tmp1 = (double *) malloc( sizeof(double) * k);
	tmp2 = (double *) malloc( sizeof(double) * k);
	weighty = (double *) malloc(sizeof(double) * m);
	bb = (double *) malloc(sizeof(double) * m);
	prob = (double *) malloc(sizeof(double) * m);
	ValueL = (double *) malloc(sizeof(double) * opt.maxIter);
	funVal = (double *) malloc(sizeof(double) * opt.maxIter);
	norm_x_k = (double *) malloc(sizeof(double) * k);



	//initialize q
	if(!hasGot(&opt,"q"))
		setOPTq(&opt,q=2);
	else{
		q = opt.q;
		if( q<0.99999999)
			errorOut("glLogisticRm1: q should be larger than 1.");
	}

	m1 = normalizeWeights(&opt,weight,y,b);
	m2 = 1.0 - m1;

	if( hasGot(&opt,"gWeights")){
		gWeights = opt.gWeights;
		// check for all positive
	}else
		dcopy(k,oneVector,1,gWeights,1);

	if( opt.rFlag == 0)
		lambda = z;
	else{
		normalizedmv('T',&opt,A,b,ATb);
		if( q == 1)
			q_bar = Inf;
		else if( q > 1.0e6)
			q_bar = 1.0;
		else
			q_bar = q / (q - 1.0);
		for(i = 0; i <k ; i++)
			norm_ATb[i] = dnrm( ind[i+1] - ind[i], &ATb[ind[i]],1,q_bar);

		// incorporate gWeight
		dtbsv('U','N','N',k,0,gWeights,1,norm_ATb,1);
		lambda_max = norm_ATb[idamax(k,norm_ATb,1)-1];
		lambda = z* lambda_max;
	}

	if(opt.init == 2){
		memset(x,0,sizeof(double)*n);
		c = log( m1/m2);
	}
	else{
		if(hasGot(&opt,"x0"))
			dcopy(n,opt.x0,1,x,1);
		else
			memset(x,0,sizeof(double)*n);
		if(hasGot(&opt,"c0"))
			c = opt.c0;
		else
			c = log(m1/m2);
	}
	normalizedmv('N',&opt,A,x,Ax);

	if(opt.mFlag == 0 && opt.lFlag==0){
		bFlag = 0;
		L = 1.0l/m; // the initial guess of the Lipschitz continuous gradient
		dsbmv('U',m,0,1.0,weight,1,y,1,0.0,weighty,1);

		// xp=x;Axp=Ax;xxp=zeros(n,1); cp=c;ccp=0;
		dcopy(n,x,1,xp,1);
		dcopy(m,Ax,1,Axp,1);
		memset(xxp,0,sizeof(double)*n);
		cp = c;
		ccp = 0;

		alphap = 0;
		alpha = 1;
		for(iterStep=0; iterStep < opt.maxIter; iterStep++){
			beta = (alphap - 1)/alpha;
			// s = x + beta * xxp;
			dcopy(n,x,1,s,1);
			daxpy(n,beta,xxp,1,s,1);
			// sc = c + beta * ccp;
			sc = c + beta * ccp;
			// As = Ax + beta * (Ax - Axp);
			dcopy(m, Ax,1, As,1);
			dscal(m,1 + beta,As,1);
			daxpy(m, -beta, Axp,1,As,1);

			//aa = -diag(y) * (As+sc);
			dcopy(m,As,1,aa,1);
			daxpy(m,sc,oneVector,1,aa,1);
			dtbmv('U','N','N',m,0,y,1,aa,1);
			dscal(m,-1.0,aa,1);

			// fun_s= weight' * ( log( exp(-bb) +  exp(aa-bb) ) + bb )+...
			//    rsL2/2 * s'*s;
			fun_s = 0;
			for(i=0; i<m;i++){
				bb[i] = maxof2(aa[i],0.0);
				fun_s += weight[i] * ( log(exp(-bb[i])+exp(aa[i]-bb[i])) + bb[i]);
			}
			fun_s += rsL2 * ddot(n,s,1,s,1)/2.0;
			// doubtful ???

			//b = -weighty .* (1-prob);
			for(i=0;i<m;i++){
				prob[i] = 1.0l/(1.0l+exp(aa[i]));
				b[i] = - weighty[i] * (1.0l - prob[i]);
			}


			// gc =  sum(b); the gradient of c
			gc = ddot(m,b,1,oneVector,1);

			// g = ATb
			normalizedmv('T',&opt,A,b,g);

			// xp = x; Axp = Ax; cp =c;
			dcopy(n,x,1,xp,1);
			dcopy(m,Ax,1,Axp,1);
			cp = c;

			for(;;){
				// v= s-g/L; c = sc- gc/L;
				dcopy(n,s,1,v,1);
				daxpy(n,-1/L,g,1,v,1);
				c = sc - gc/L;

				dcopy(k,gWeights,1,tmp1,1);
				dscal(k,lambda/L,tmp1,1);
				for(i=0;i<=k;i++)
					tmp2[i] = ind[i];
				if(q < 1.0e6)
					eppVector(x,v,tmp2,k,n,tmp1,q);
				else
					eppVector(x,v,tmp2,k,n,tmp1, 1.0e6);

				// v = x -s ;
				dcopy(n,x,1,v,1);
				daxpy(n,-1,s,1,v,1);

				normalizedmv('N',&opt,A,x,Ax);

				// aa = -y .* (Ax+c);
				dcopy(m,Ax,1,aa,1);
				daxpy(m,c,oneVector,1,aa,1);
				dtbmv('U','N','N',m,0,y,1,aa,1);
				dscal(m,-1.0,aa,1);

				// bb= max(aa,0);
				// fun_s= weight' * ( log( exp(-bb) +  exp(aa-bb) ) + bb )+...
				//    rsL2/2 * s'*s;
				fun_x = 0;
				for(i=0; i<m;i++){
					bb[i] = maxof2(aa[i],0.0);
					fun_x += weight[i] * ( log(exp(-bb[i])+exp(aa[i]-bb[i])) + bb[i]);
				}
				fun_x += rsL2 * ddot(n,x,1,x,1)/2.0;

				r_sum = (ddot(n,v,1,v,1) + (c-sc)*(c-sc))/2;
				l_sum = fun_x - fun_s - ddot(n,v,1,g,1) - (c-sc)*gc;

				if( r_sum <= 1.0e-20){
					bFlag = 1;
					break;
				}

				if( l_sum <= r_sum * L)
					break;
				else
					L = maxof2(2*L, l_sum/r_sum);
			}
			alphap = alpha;
			alpha = (1 + sqrt(4* alpha * alpha + 1))/2;
			ValueL[iterStep] = L;

			// xxp = x- xp; ccp = c-cp;
			dcopy(n,x,1,xxp,1);
			daxpy(n,-1,xp,1,xxp,1);
			ccp = c - cp;
			funVal[iterStep] = fun_x;

			for(i=0;i<k; i++)
				norm_x_k[i] = dnrm(ind[i+1]-ind[i],&x[ind[i]],1,q);

			fVal = fValp = funVal[iterStep] = fun_x + lambda * ddot(k,norm_x_k,1,gWeights,1);
			if(iterStep !=0)
				fValp = funVal[iterStep -1];

			if(bFlag)
				break;

			if(terminationCondition(&opt,fVal,fValp,x,dnrm2(n,xxp,1),dnrm2(n,xp,1),iterStep))
				break;
		}
		ret.errorCode = 0;
		ret.c =c;
		ret.funVal = funVal;
		ret.totIter = iterStep;
		ret.ValueL = ValueL;
		ret.x = x;
	}
	return ret;
}

/* Nemirovski's line search + reformulated problem */
FUNVAL glLogisticRm2(double* A, double* y, double z, OPTS opt){
	int m,n, iterStep, i,k, *ind;
	int bFlag;
	FUNVAL ret;
	double q, q_bar, m1, m2, *weight,  *gWeights;
	double *b, *ATb, *norm_ATb, beta, gc, sc;
	double lambda, lambda_max, c, L, cp, ccp, alpha, alphap;
	double *x, *Ax, *xp, *Axp, *xxp, *aa, *As;
	double r_sum, l_sum, *ValueL, *funVal, fun_x, fun_s, fVal, fValp;
	double *tmp1, *tmp2;
	double *g, *v, *s, *bb, *weighty, *prob;
	double *t, *tp, *s_t, *u, *v_t;
	double norm_xp, norm_xxp;

	m = opt.m;
	n = opt.n;

	if(z <=  0.0)
		errorOut("glLogisticRm1: z should be positive.");

	// opts = sllopts();
	initNormalization(&opt,A);

	if( !hasGot(&opt,"ind"))
		errorOut("glLogisticRm1: 'ind' missing in opts");
	else{
		ind = opt.ind;
		k = opt.ind_size - 1;
		if( ind[k] != n)
			errorOut("glLogisticRm1: Check opt.ind " );
	}

	weight = (double *) malloc( sizeof(double) * m);
	b = (double *) malloc( sizeof(double) * m);
	gWeights = (double *) malloc( sizeof(double) * k);
	ATb = (double *) malloc(sizeof(double) * n);
	norm_ATb = (double *) malloc(sizeof(double) * k);
	x = (double * ) malloc( sizeof(double) * n);
	Ax = (double *) malloc( sizeof(double) * m);
	xp = (double * ) malloc( sizeof(double) * n);
	xxp = (double * ) malloc( sizeof(double) * n);
	Axp = (double *) malloc( sizeof(double) * m);
	aa = (double *) malloc( sizeof(double) * m);
	As = (double *) malloc( sizeof(double) * m);
	s = (double *) malloc( sizeof(double) * n);
	g = (double *) malloc( sizeof(double) * n);
	v = (double *) malloc( sizeof(double) * n);
	tmp1 = (double *) malloc( sizeof(double) * k);
	tmp2 = (double *) malloc( sizeof(double) * k);
	weighty = (double *) malloc(sizeof(double) * m);
	bb = (double *) malloc(sizeof(double) * m);
	prob = (double *) malloc(sizeof(double) * m);
	ValueL = (double *) malloc(sizeof(double) * opt.maxIter);
	funVal = (double *) malloc(sizeof(double) * opt.maxIter);
	t = (double * ) malloc( sizeof(double) * k);
	tp = (double *) malloc( sizeof(double) * k);
	s_t = (double *) malloc( sizeof(double) * k);
	v_t = (double *) malloc( sizeof(double) * k);
	u = (double *) malloc( sizeof(double) * n);

	//initialize q
	if(!hasGot(&opt,"q"))
		setOPTq(&opt,(q=2));
	else{
		q = opt.q;
		if( q<0.99999999)
			errorOut("glLogisticRm1: q should be larger than 1.");
	}

	m1 = normalizeWeights(&opt,weight,y,b);
	m2 = 1.0 - m1;

	if( hasGot(&opt,"gWeights")){
		gWeights = opt.gWeights;
		// check for all positive
	}else
		dcopy(k,oneVector,1,gWeights,1);

	if( opt.rFlag == 0)
		lambda = z;
	else{
		normalizedmv('T',&opt,A,b,ATb);
		if( q == 1)
			q_bar = Inf;
		else if( q > 1.0e6)
			q_bar = 1.0;
		else
			q_bar = q / (q - 1.0);
		for(i = 0; i <k ; i++)
			norm_ATb[i] = dnrm( ind[i+1] - ind[i], &ATb[ind[i]],1,q_bar);

		// incorporate gWeight
		dtbsv('U','N','N',k,0,gWeights,1,norm_ATb,1);
		lambda_max = norm_ATb[idamax(k,norm_ATb,1)-1];
		lambda = z* lambda_max;
	}

	if(opt.init == 2){
		memset(x,0,sizeof(double)*n);
		c = log( m1/m2);
	}
	else{
		if(hasGot(&opt,"x0"))
			dcopy(n,opt.x0,1,x,1);
		else
			memset(x,0,sizeof(double)*n);
		if(hasGot(&opt,"c0"))
			c = opt.c0;
		else
			c = log(m1/m2);
	}
	normalizedmv('N',&opt,A,x,Ax);

	if(opt.mFlag == 1 && opt.lFlag==0 && opt.q==2){
		bFlag = 0;
		L = 1.0l/m; // the initial guess of the Lipschitz continuous gradient
		dsbmv('U',m,0,1.0,weight,1,y,1,0.0,weighty,1);

		// xp=x;Axp=Ax;xxp=zeros(n,1); cp=c;ccp=0;
		dcopy(n,x,1,xp,1);
		dcopy(m,Ax,1,Axp,1);
		memset(xxp,0,sizeof(double)*n);
		cp = c;
		ccp = 0;

		for(i=0;i<k;i++)
			t[i]=dnrm2(ind[i+1]-ind[i],&x[ind[i]],1);
		//tp=t;
		dcopy(k,t,1,tp,1);

		alphap = 0;
		alpha = 1;
		for(iterStep=0; iterStep < opt.maxIter; iterStep++){
			beta = (alphap - 1)/alpha;
			// s = x + beta * xxp;
			dcopy(n,x,1,s,1);
			daxpy(n,beta,xxp,1,s,1);
			// sc = c + beta * ccp;
			sc = c + beta * ccp;

			//s_t = t + beta * (t-tp);
			dcopy(k,t,1,s_t,1);
			dscal(k,1+beta,s_t,1);
			daxpy(k,-beta,tp,1,s_t,1);

			// As = Ax + beta * (Ax - Axp);
			dcopy(m, Ax,1, As,1);
			dscal(m,1 + beta,As,1);
			daxpy(m, -beta, Axp,1,As,1);

			//aa = -diag(y) * (As+sc);
			dcopy(m,As,1,aa,1);
			daxpy(m,sc,oneVector,1,aa,1);
			dtbmv('U','N','N',m,0,y,1,aa,1);
			dscal(m,-1.0,aa,1);

			// fun_s= weight' * ( log( exp(-bb) +  exp(aa-bb) ) + bb )+...
			//    rsL2/2 * s'*s;
			fun_s = 0;
			for(i=0; i<m;i++){
				bb[i] = maxof2(aa[i],0.0);
				fun_s += weight[i] * ( log(exp(-bb[i])+exp(aa[i]-bb[i])) + bb[i]);
			}

			//b = -weighty .* (1-prob);
			for(i=0;i<m;i++){
				prob[i] = 1.0l/(1.0l+exp(aa[i]));
				b[i] = - weighty[i] * (1.0l - prob[i]);
			}


			// gc =  sum(b); the gradient of c
			gc = ddot(m,b,1,oneVector,1);

			// g = ATb
			normalizedmv('T',&opt,A,b,g);

			// xp = x; Axp = Ax; cp =c;
			dcopy(n,x,1,xp,1);
			dcopy(m,Ax,1,Axp,1);
			cp = c;
			//tp=t
			dcopy(k,t,1,tp,1);

			for(;;){
				// u= s-g/L; c = sc- gc/L;
				dcopy(n,s,1,u,1);
				daxpy(n,-1/L,g,1,u,1);
				c = sc - gc/L;

				//v = s_t - lambda/L;
				dcopy(k,s_t,1,v,1);
				daxpy(k,-lambda/L,oneVector,1,v,1);

				//dcopy(k,gWeights,1,tmp1,1);
				//dscal(k,lambda/L,tmp1,1);
				for(i=0;i<=k;i++)
					tmp2[i] = ind[i];
				eppVectorR(x,t,u,v,tmp2,n,k);

				// v = x -s ;
				dcopy(n,x,1,v,1);
				daxpy(n,-1,s,1,v,1);

				//v_t = t - s_t
				dcopy(k,t,1,v_t,1);
				daxpy(k,-1,s_t,1,v_t,1);


				normalizedmv('N',&opt,A,x,Ax);

				// aa = -y .* (Ax+c);
				dcopy(m,Ax,1,aa,1);
				daxpy(m,c,oneVector,1,aa,1);
				dtbmv('U','N','N',m,0,y,1,aa,1);
				dscal(m,-1.0,aa,1);

				// bb= max(aa,0);
				// fun_s= weight' * ( log( exp(-bb) +  exp(aa-bb) ) + bb )+...
				//    rsL2/2 * s'*s;
				fun_x = 0;
				for(i=0; i<m;i++){
					bb[i] = maxof2(aa[i],0.0);
					fun_x += weight[i] * ( log(exp(-bb[i])+exp(aa[i]-bb[i])) + bb[i]);
				}

				r_sum = (ddot(n,v,1,v,1) + (c-sc)*(c-sc) + ddot(k,v_t,1,v_t,1))/2;
				l_sum = fun_x - fun_s - ddot(n,v,1,g,1) - (c-sc)*gc;

				if( r_sum <= 1.0e-20){
					bFlag = 1;
					break;
				}

				if( l_sum <= r_sum * L)
					break;
				else
					L = maxof2(2*L, l_sum/r_sum);
			}
			alphap = alpha;
			alpha = (1 + sqrt(4* alpha * alpha + 1))/2;
			ValueL[iterStep] = L;

			// xxp = x- xp; ccp = c-cp;
			dcopy(n,x,1,xxp,1);
			daxpy(n,-1,xp,1,xxp,1);
			ccp = c - cp;

			fVal = fValp = funVal[iterStep] = fun_x + lambda *ddot(k,t,1,gWeights,1);
			if(iterStep !=0)
				fValp = funVal[iterStep -1];

			if(bFlag)
				break;

			norm_xxp = sqrt(ddot(n,xxp,1,xxp,1) + (c-cp)*(c-cp) + ddot(k,t,1,t,1)
				+ddot(k,tp,1,tp,1) - 2*ddot(k,t,1,tp,1));
			norm_xp = sqrt(ddot(n,xp,1,xp,1) + ddot(k,tp,1,tp,1) + cp*cp);
			if(terminationCondition(&opt,fVal,fValp,x,norm_xxp,norm_xp,iterStep))
				break;
		}
		ret.errorCode = 0;
		ret.c =c;
		ret.funVal = funVal;
		ret.totIter = iterStep;
		ret.ValueL = ValueL;
		ret.x = x;
	}
	return ret;
}

/* Adaptive line search  */
FUNVAL glLogisticRm3(double* A, double* y, double z, OPTS opt){

	int m,n, iterStep, i,k, *ind;
	int bFlag;
	FUNVAL ret;
	double q, q_bar, m1, m2, *weight,  *gWeights;
	double *b, *ATb, *norm_ATb, beta, gc, sc;
	double lambda, lambda_max, c, L, cp, ccp, alpha, alphap, gamma;
	double *x, *Ax, *xp, *Axp, *xxp, *aa, *As;
	double r_sum, l_sum, *ValueL, *funVal, fun_x, fun_s, fVal, fValp;
	double *tmp1, *tmp2;
	double *g, *v, *s, *bb, *weighty, *prob;
	double *t, *tp, *s_t, *u, *v_t;
	double norm_xp, norm_xxp;
	double *xnew, *tnew, cnew, *Axnew, tao;

	m = opt.m;
	n = opt.n;

	if(z <=  0.0)
		errorOut("glLogisticRm1: z should be positive.");

	// opts = sllopts();
	initNormalization(&opt,A);

	if( !hasGot(&opt,"ind"))
		errorOut("glLogisticRm3: 'ind' missing in opts");
	else{
		ind = opt.ind;
		k = opt.ind_size - 1;
		if( ind[k] != n)
			errorOut("glLogisticRm3: Check opt.ind " );
	}

	weight = (double *) malloc( sizeof(double) * m);
	b = (double *) malloc( sizeof(double) * m);
	gWeights = (double *) malloc( sizeof(double) * k);
	ATb = (double *) malloc(sizeof(double) * n);
	norm_ATb = (double *) malloc(sizeof(double) * k);
	x = (double * ) malloc( sizeof(double) * n);
	Ax = (double *) malloc( sizeof(double) * m);
	xp = (double * ) malloc( sizeof(double) * n);
	xxp = (double * ) malloc( sizeof(double) * n);
	Axp = (double *) malloc( sizeof(double) * m);
	aa = (double *) malloc( sizeof(double) * m);
	As = (double *) malloc( sizeof(double) * m);
	s = (double *) malloc( sizeof(double) * n);
	g = (double *) malloc( sizeof(double) * n);
	v = (double *) malloc( sizeof(double) * n);
	tmp1 = (double *) malloc( sizeof(double) * k);
	tmp2 = (double *) malloc( sizeof(double) * k);
	weighty = (double *) malloc(sizeof(double) * m);
	bb = (double *) malloc(sizeof(double) * m);
	prob = (double *) malloc(sizeof(double) * m);
	ValueL = (double *) malloc(sizeof(double) * opt.maxIter);
	funVal = (double *) malloc(sizeof(double) * opt.maxIter);
	t = (double * ) malloc( sizeof(double) * k);
	tp = (double *) malloc( sizeof(double) * k);
	s_t = (double *) malloc( sizeof(double) * k);
	v_t = (double *) malloc( sizeof(double) * k);
	u = (double *) malloc( sizeof(double) * n);
	tnew = (double *) malloc( sizeof(double) * k);
	xnew = (double *) malloc( sizeof(double) * n);
	Axnew = (double *) malloc( sizeof(double) * m);

	//initialize q
	if(!hasGot(&opt,"q"))
		setOPTq(&opt,(q=2));
	else{
		q = opt.q;
		if( q<0.99999999)
			errorOut("glLogisticRm1: q should be larger than 1.");
	}

	m1 = normalizeWeights(&opt,weight,y,b);
	m2 = 1.0 - m1;

	if( hasGot(&opt,"gWeights")){
		gWeights = opt.gWeights;
		// check for all positive
	}else
		dcopy(k,oneVector,1,gWeights,1);

	if( opt.rFlag == 0)
		lambda = z;
	else{
		normalizedmv('T',&opt,A,b,ATb);
		if( q == 1)
			q_bar = Inf;
		else if( q > 1.0e6)
			q_bar = 1.0;
		else
			q_bar = q / (q - 1.0);
		for(i = 0; i <k ; i++)
			norm_ATb[i] = dnrm( ind[i+1] - ind[i], &ATb[ind[i]],1,q_bar);

		// incorporate gWeight
		dtbsv('U','N','N',k,0,gWeights,1,norm_ATb,1);
		lambda_max = norm_ATb[idamax(k,norm_ATb,1)-1];
		lambda = z* lambda_max;
	}

	if(opt.init == 2){
		memset(x,0,sizeof(double)*n);
		c = log( m1/m2);
	}
	else{
		if(hasGot(&opt,"x0"))
			dcopy(n,opt.x0,1,x,1);
		else
			memset(x,0,sizeof(double)*n);
		if(hasGot(&opt,"c0"))
			c = opt.c0;
		else
			c = log(m1/m2);
	}
	normalizedmv('N',&opt,A,x,Ax);

	if(opt.mFlag == 1 && opt.lFlag==1 && opt.q==2){
		bFlag = 0;
		L = 1.0l/m; // the initial guess of the Lipschitz continuous gradient
		dsbmv('U',m,0,1.0,weight,1,y,1,0.0,weighty,1);

		gamma = 1;

		// xp=x;Axp=Ax;xxp=zeros(n,1); cp=c;ccp=0;
		dcopy(n,x,1,xp,1);
		dcopy(m,Ax,1,Axp,1);
		memset(xxp,0,sizeof(double)*n);
		cp = c;
		ccp = 0;

		for(i=0;i<k;i++)
			t[i]=dnrm2(ind[i+1]-ind[i],&x[ind[i]],1);
		//tp=t
		dcopy(k,t,1,tp,1);

		for(iterStep=0; iterStep < opt.maxIter; iterStep++){
			
			for(;;){
				if(iterStep!=0){
					alpha = -gamma + sqrt( gamma*gamma + 4*L*gamma)/(2*L);
					beta = ( gamma - gamma * alphap)/(alphap *gamma + alphap*L*alpha);

					// s = x + beta * xxp;
					dcopy(n,x,1,s,1);
					daxpy(n,beta,xxp,1,s,1);
					// sc = c + beta * ccp;
					sc = c + beta * ccp;

					//s_t = t + beta * (t-tp);
					dcopy(k,t,1,s_t,1);
					dscal(k,1+beta,s_t,1);
					daxpy(k,-beta,tp,1,s_t,1);

					// As = Ax + beta * (Ax - Axp);
					dcopy(m, Ax,1, As,1);
					dscal(m,1 + beta,As,1);
					daxpy(m, -beta, Axp,1,As,1);
				}
				else{
					alpha = (-1 + sqrt(5.0l))/2.0;
					beta = 0;
					//s =x ; s_t= t;sc =c; As = Ax;
					dcopy(n,x,1,s,1);
					dcopy(k,t,1,s_t,1);
					sc = c;
					dcopy(m,Ax,1,As,1);
				}

				//aa = -diag(y) * (As+sc);
				dcopy(m,As,1,aa,1);
				daxpy(m,sc,oneVector,1,aa,1);
				dtbmv('U','N','N',m,0,y,1,aa,1);
				dscal(m,-1.0,aa,1);

				// fun_s= weight' * ( log( exp(-bb) +  exp(aa-bb) ) + bb )+...
				//    rsL2/2 * s'*s;
				fun_s = 0;
				for(i=0; i<m;i++){
					bb[i] = maxof2(aa[i],0.0);
					fun_s += weight[i] * ( log(exp(-bb[i])+exp(aa[i]-bb[i])) + bb[i]);
				}

				//b = -weighty .* (1-prob);
				for(i=0;i<m;i++){
					prob[i] = 1.0l/(1.0l+exp(aa[i]));
					b[i] = - weighty[i] * (1.0l - prob[i]);
				}


				// gc =  sum(b); the gradient of c
				gc = ddot(m,b,1,oneVector,1);

				// g = ATb
				normalizedmv('T',&opt,A,b,g);

				// u= s-g/L; cnew = sc- gc/L;
				dcopy(n,s,1,u,1);
				daxpy(n,-1/L,g,1,u,1);
				cnew = sc - gc/L;

				//v = s_t - lambda/L;
				dcopy(k,s_t,1,v,1);
				daxpy(k,-lambda/L,oneVector,1,v,1);

				//dcopy(k,gWeights,1,tmp1,1);
				//dscal(k,lambda/L,tmp1,1);
				for(i=0;i<=k;i++)
					tmp2[i] = ind[i];
				eppVectorR(xnew,tnew,u,v,tmp2,n,k);

				// v = xnew -s ;
				dcopy(n,xnew,1,v,1);
				daxpy(n,-1,s,1,v,1);

				//v_t = tnew - s_t
				dcopy(k,tnew,1,v_t,1);
				daxpy(k,-1,s_t,1,v_t,1);


				normalizedmv('N',&opt,A,xnew,Axnew);

				// aa = -y .* (Axnew+cnew);
				dcopy(m,Axnew,1,aa,1);
				daxpy(m,cnew,oneVector,1,aa,1);
				dtbmv('U','N','N',m,0,y,1,aa,1);
				dscal(m,-1.0,aa,1);

				// bb= max(aa,0);
				// fun_s= weight' * ( log( exp(-bb) +  exp(aa-bb) ) + bb )+...
				//    rsL2/2 * s'*s;
				fun_x = 0;
				for(i=0; i<m;i++){
					bb[i] = maxof2(aa[i],0.0);
					fun_x += weight[i] * ( log(exp(-bb[i])+exp(aa[i]-bb[i])) + bb[i]);
				}

				r_sum = (ddot(n,v,1,v,1) + (cnew-sc)*(cnew-sc) + ddot(k,v_t,1,v_t,1))/2;
				l_sum = fun_x - fun_s - ddot(n,v,1,g,1) - (cnew-sc)*gc;

				if( r_sum <= 1.0e-20){
					bFlag = 1;
					break;
				}

				if( l_sum <= r_sum * L)
					break;
				else
					L = maxof2(2*L, l_sum/r_sum);
			}
			gamma = L*alpha*alpha;
			alphap = alpha;
			ValueL[iterStep] = L;

			tao = L * r_sum / l_sum;
			if(tao>=5)
				L =L*0.8;

			//xp = x; x=xnew; xxp = x-xp;
			//Axp = Ax; Ax = Axnew ; tp = t; t =tnew; cp=c; c=cnew; ccp =c-cp;
			dcopy(n,x,1,xp,1);
			dcopy(n,xnew,1,x,1);
			dcopy(n,x,1,xxp,1);
			daxpy(n,-1,xp,1,xxp,1);
			dcopy(m,Ax,1,Axp,1);
			dcopy(m,Axnew,1,Ax,1);
			dcopy(k,t,1,tp,1);
			dcopy(k,tnew,1,t,1);

			cp = c;
			c = cnew;
			ccp = c - cp;

			fVal = fValp = funVal[iterStep] = fun_x + lambda *ddot(k,t,1,gWeights,1);
			if(iterStep !=0)
				fValp = funVal[iterStep -1];

			if(bFlag)
				break;

			norm_xxp = sqrt(ddot(n,xxp,1,xxp,1) + (c-cp)*(c-cp) + ddot(k,t,1,t,1)
				+ddot(k,tp,1,tp,1) - 2*ddot(k,t,1,tp,1));
			norm_xp = sqrt(ddot(n,xp,1,xp,1) + ddot(k,tp,1,tp,1) + cp*cp);
			if(terminationCondition(&opt,fVal,fValp,x,norm_xxp,norm_xp,iterStep))
				break;
		}
		ret.errorCode = 0;
		ret.c =c;
		ret.funVal = funVal;
		ret.totIter = iterStep;
		ret.ValueL = ValueL;
		ret.x = x;
	}
	return ret;
}


