

#include "slep.h"
#include "hr_time.h"
#include "epph.h"

FUNVAL glLeastRm1(double* A, double* y, double z, OPTS opt);
FUNVAL glLeastRm2(double* A, double* y, double z, OPTS opt);
FUNVAL glLeastRm3(double* A, double* y, double z, OPTS opt);

FUNVAL glLeastR(double* A, double* y, double z, OPTS opt){
		if(opt.mFlag == 0 && opt.lFlag == 0 )
			return glLeastRm1(A,y,z,opt);
		else if(opt.mFlag == 1 && opt.lFlag == 0 && opt.q == 2)
			return glLeastRm2(A,y,z,opt);
		else if( opt.mFlag == 1 && opt.lFlag == 1 && opt.q == 2)
			return glLeastRm3(A,y,z,opt);
		else
			errorOut("Select the appropriate method for glLeastR, by setting opt.mFlag and opt.lFlag appropriately.");
}

/* Armijo Line Search */
FUNVAL glLeastRm1(double* A, double* y, double z, OPTS opt){
	FUNVAL ret;
	int m,n, iterStep, k,i, bFlag;
	int * ind;
	double q,q_bar;
	double* gWeights;
	double *ATy, *x, *Ax, *Axy, *xxp, *xp, *ATAs, *Axp, *As, *g, *s, *norm_ATy, *v, *x_norm, *Av, *tmp1, *tmp2;
	double *funVal,*ValueL;
	double lambda, alpha, alphap, L, l_sum, r_sum,fVal,fValp, lambda_max, beta;

	m = opt.m;
	n = opt.n;
	initNormalization(&opt,A);

	/* Initialize 'ind' */
	if(!hasGot(&opt,"ind"))
		errorOut("Please set the OPT.ind field before executing again.");
	else{
		ind = opt.ind;
		k = opt.ind_size - 1;
		if(ind[k] != (n))
			errorOut("Check opt.ind, the last value should be n");
	}

	gWeights = (double * ) malloc( sizeof(double) * k);
	ATy = (double * ) malloc( sizeof(double) * n);
	norm_ATy = (double * ) malloc( sizeof(double) * k);
	x = (double * ) malloc( sizeof(double) * n);
	Ax = (double * ) malloc( sizeof(double) * m);
	xp = (double * ) malloc( sizeof(double) * n);
	Axp = (double * ) malloc( sizeof(double) * m);
	xxp = (double * ) malloc( sizeof(double) * n);
	s = (double * ) malloc( sizeof(double) * n);
	As = (double * ) malloc( sizeof(double) * m);
	ATAs = (double * ) malloc( sizeof(double) * n);
	g = (double * ) malloc( sizeof(double) * n);
	v = (double * ) malloc( sizeof(double) * n);
	tmp1 = (double * ) malloc( sizeof(double) * maxof2(m,n));
	tmp2 = (double * ) malloc( sizeof(double) * maxof2(m,n));
	Av = (double * ) malloc( sizeof(double) * m);
	Axy = (double * ) malloc( sizeof(double) * m);
	ValueL = (double * ) malloc( sizeof(double) * opt.maxIter);
	x_norm = (double * ) malloc( sizeof(double) * k);
	funVal = (double * ) malloc( sizeof(double) * opt.maxIter);

	/* Initialize 'q' */
	if( !hasGot(&opt,"q"))
		opt.q = q = 2;
	else
		if((q = opt.q)<1)
			errorOut("q should be larger than 1");

	/* Initialize gWeights - The weights for each group */
	if(hasGot(&opt,"gWeights")){
		gWeights = opt.gWeights;
		// TO-DO have to assert all gWeights are positive else errorOut
	}
	else
		dcopy(k,oneVector,1,gWeights,1);

	/********* Starting point initialization *******/
	/* Compute ATy */
	normalizedmv('T',&opt,A,y,ATy);

	/* Process the regularization parameter */
	if( opt.rFlag == 0)
		lambda = z;
	else{
		if( z<0.0  || z>1.0)
			errorOut("z should be in range (0,1)");
		if( q == 1)
			q_bar = Inf;
		else if( q > 1.0e6)
			q_bar = 1.0;
		else
			q_bar = q / (q - 1.0);

		//% compute the norm of ATy corresponding to each group
		//norm_ATy=zeros(k,1);
		//for i=1:k
		//	norm_ATy(i,1)=norm( ATy( (1+ind(i)):ind(i+1) ), q_bar );
		//end
		for(i = 0; i <k ; i++)
			norm_ATy[i] = dnrm( ind[i+1] - ind[i], &ATy[ind[i]],1,q_bar);

		// Incorporate gWeight
		//norm_ATy=norm_ATy./gWeight;
		dtbsv('U','N','N',k,0,gWeights,1,norm_ATy,1);

		//    lambda_max=max(norm_ATy);
		lambda_max = idamax(k,norm_ATy,1);

		//lambda=z*lambda_max;
		lambda=z*lambda_max;
	}

	/* Initialize a starting point */
	if ( opt.init == 2)
		memset(x,0,sizeof(double)*n);
	else{
		if ( hasGot(&opt,"x0"))
			x = opt.x0;
		else 
			x = ATy;
	}

	// Compute Ax
	normalizedmv('N',&opt,A,x,Ax);

	// TO-DO add code for condition opt.init==0 where we call initfactor

	/******** The main program starts here Armijo Line Search Algorithm ********/

	bFlag = 1;

	if( opt.mFlag == 0 && opt.lFlag == 0 ){

		// We assume the maximum eigen value of A'A is over 1
		L = 1.0;

		//xp=x; Axp=Ax; xxp=zeros(n,1);
		dcopy(n,x,1,xp,1);
		dcopy(m,Ax,1,Axp,1);
		memset(xxp,0,sizeof(double)*n);

		alphap=0; alpha=1;

		for(iterStep = 0 ; iterStep < opt.maxIter ; iterStep ++ ){
			beta=(alphap-1)/alpha;    

			//s=x + beta* xxp;
			dcopy(n,x,1,s,1);
			daxpy(n,beta,xxp,1,s,1);

			// Compute the gradient g
			//As=Ax + beta* (Ax-Axp);
			dcopy(m,Ax,1,As,1);
			dscal(m,1.0 + beta,As,1);
			daxpy(m,-beta,Axp,1,As,1);

			// Compute ATAs
			normalizedmv('T',&opt,A,As,ATAs);

			// Obtain the gradient
			//g=ATAs-ATy;
			dcopy(n,ATAs,1,g,1);
			daxpy(n,-1.0,ATy,1,g,1);

			// xp=x;    Axp=Ax;
			dcopy(n,x,1,xp,1);
			dcopy(m,Ax,1,Axp,1);

			for(;;){
				//v=s-g/L;
				dcopy(n,s,1,v,1);
				daxpy(n,-1.0/L , g,1,v,1);

/*
				% Lq/L1-norm regularized projection
				if (q<1e6)
					x=eppVector(v, ind, k, n, lambda/ L * gWeight, q);
				else % when q>=1e6, we treat q as inf
					x=eppVector(v, ind, k, n, lambda/ L * gWeight, 1e6);
				end

*/				
				dcopy(k,gWeights,1,tmp1,1);
				dscal(k,lambda/L,tmp1,1);
				for(i=0;i<=k;i++)
					tmp2[i] = ind[i];
				if(q < 1.0e6)
					eppVector(x,v,tmp2,k,n,tmp1,q);
				else
					eppVector(x,v,tmp2,k,n,tmp1, 1.0e6);

				// v=x-s;
				dcopy(n,x,1,v,1);
				daxpy(n,-1.0,s,1,v,1);

				// COmpute Ax
				normalizedmv('N',&opt,A,x,Ax);

				// Av=Ax -As;
				dcopy(m,Ax,1,Av,1);
				daxpy(m,-1.0,As,1,Av,1);

				//r_sum=v'*v; l_sum=Av'*Av;

				r_sum = ddot(n,v,1,v,1);
				l_sum = ddot(m,Av,1,Av,1);

				if (r_sum <=1e-20){
					bFlag=1; //% this shows that, the gradient step makes little improvement
					break;
				}

				if(l_sum <= r_sum * L)
					break;
				else
					L=max(2*L, l_sum/r_sum);
			}

			/*--------------- Step 3 --------------------*/

			// update alpha and check whether they converge
			alphap=alpha; alpha= (1.0l+ sqrt(4.0l*alpha*alpha +1.0l))/2.0l;

			//        xxp=x-xp;   Axy=Ax-y;
			dcopy(n,x,1,xxp,1);
			daxpy(n,-1.0,xp,1,xxp,1);
			dcopy(m,Ax,1,Axy,1);
			daxpy(m,-1.0,y,1,Axy,1);

			// ValueL(iterStep)=L;
			ValueL[iterStep] = L;

	/*
			% the q-norm of x
			x_norm=zeros(k,1);
			for i=1:k
				x_norm(i,1)=norm(  x( (1+ind(i)):ind(i+1) ), q);
			end

	*/
			for(i=0; i < k ; i ++ )
				x_norm[i] = dnrm(ind[i+1] - ind[i],&x[ind[i]],1,q);


			//funVal(iterStep)=Axy'* Axy/2 + lambda * x_norm'* gWeight;
			fVal = fValp = funVal[iterStep] = lambda *ddot(k,x_norm,1,gWeights,1) + ddot(m,Axy,1,Axy,1)/2.0 ;
			if(iterStep != 0)
				fValp = funVal[iterStep -1];

			if(terminationCondition(&opt,fVal,fValp,x,dnrm2(n,xxp,1),dnrm2(n,xp,1),iterStep))
				break;
			}

	}
	ret.errorCode = 0;
	ret.x = x;
	ret.funVal = funVal;
	strcpy(ret.type,"glLeastRm1");
	ret.totIter = iterStep;
	ret.ValueL = ValueL;
	return ret;
}


/* Nemirovski's line search + reformulated problem */
FUNVAL glLeastRm2(double* A, double* y, double z, OPTS opt){
	FUNVAL ret;
	int m,n, iterStep, k,i, bFlag;
	int * ind;
	double q,q_bar;
	double* gWeights;
	double *ATy, *x, *Ax, *Axy, *xxp, *xp, *ATAs, *Axp, *As, *g, *s, *norm_ATy, *v, *x_norm, *Av, *tmp1, *tmp2, *t, *s_t, *tp, *u;
	double *funVal,*ValueL;
	double lambda, alpha, alphap, L, l_sum, r_sum,fVal,fValp, lambda_max, beta;

	m = opt.m;
	n = opt.n;
	initNormalization(&opt,A);

	/* Initialize 'ind' */
	if(!hasGot(&opt,"ind"))
		errorOut("Please set the OPT.ind field before executing again.");
	else{
		ind = opt.ind;
		k = opt.ind_size - 1;
		if(ind[k] != (n))
			errorOut("Check opt.ind, the last value should be n");
	}
	gWeights = (double * ) malloc( sizeof(double) * k);
	ATy = (double * ) malloc( sizeof(double) * n);
	norm_ATy = (double * ) malloc( sizeof(double) * k);
	x = (double * ) malloc( sizeof(double) * n);
	Ax = (double * ) malloc( sizeof(double) * m);
	xp = (double * ) malloc( sizeof(double) * n);
	Axp = (double * ) malloc( sizeof(double) * m);
	xxp = (double * ) malloc( sizeof(double) * n);
	t = (double * ) malloc( sizeof(double) * k);
	tp = (double * ) malloc( sizeof(double) * k);
	s = (double * ) malloc( sizeof(double) * n);
	s_t = (double * ) malloc( sizeof(double) * k);
	As = (double * ) malloc( sizeof(double) * m);
	ATAs = (double * ) malloc( sizeof(double) * n);
	g = (double * ) malloc( sizeof(double) * n);
	u = (double * ) malloc( sizeof(double) * n);
	v = (double * ) malloc( sizeof(double) * n);
	tmp2 = (double * ) malloc( sizeof(double) * maxof2(m,n));
	Av = (double * ) malloc( sizeof(double) * m);
	ValueL = (double * ) malloc( sizeof(double) * opt.maxIter);
	Axy = (double * ) malloc( sizeof(double) * m);
	funVal = (double * ) malloc( sizeof(double) * opt.maxIter);



	/* Initialize 'q' */
	if( !hasGot(&opt,"q"))
		opt.q = q = 2;
	else
		if((q = opt.q)<1)
			errorOut("q should be larger than 1");

	/* Initialize gWeights - The weights for each group */
	if(hasGot(&opt,"gWeights")){
		gWeights = opt.gWeights;
		// TO-DO have to assert all gWeights are positive else errorOut
	}
	else
		dcopy(k,oneVector,1,gWeights,1);

	/********* Starting point initialization *******/
	/* Compute ATy */
	normalizedmv('T',&opt,A,y,ATy);

	/* Process the regularization parameter */
	if( opt.rFlag == 0)
		lambda = z;
	else{
		if( z<0.0  || z>1.0)
			errorOut("z should be in range (0,1)");
		if( q == 1)
			q_bar = Inf;
		else if( q > 1.0e6)
			q_bar = 1.0;
		else
			q_bar = q / (q - 1.0);

		//% compute the norm of ATy corresponding to each group
		//norm_ATy=zeros(k,1);
		//for i=1:k
		//	norm_ATy(i,1)=norm( ATy( (1+ind(i)):ind(i+1) ), q_bar );
		//end
		for(i = 0; i <k ; i++)
			norm_ATy[i] = dnrm( ind[i+1] - ind[i], &ATy[ind[i]],1,q_bar);

		// Incorporate gWeight
		//norm_ATy=norm_ATy./gWeight;
		dtbsv('U','N','N',k,0,gWeights,1,norm_ATy,1);

		//    lambda_max=max(norm_ATy);
		lambda_max = idamax(k,norm_ATy,1);

		//lambda=z*lambda_max;
		lambda=z*lambda_max;
	}

	/* Initialize a starting point */
	if ( opt.init == 2)
		memset(x,0,sizeof(double)*n);
	else{
		if ( hasGot(&opt,"x0"))
			x = opt.x0;
		else 
			x = ATy;
	}

	// Compute Ax
	normalizedmv('N',&opt,A,x,Ax);

	// TO-DO add code for condition opt.init==0 where we call initfactor
	bFlag = 1;

	/********************** Nemirovski's line search + reformulated problem **************************/
	if (opt.mFlag==1 && opt.lFlag==0 && opt.q==2){
		L=1.0;

		//xp=x; Axp=Ax; xxp=zeros(n,1);
		dcopy(n,x,1,xp,1);
		dcopy(m,Ax,1,Axp,1);
		memset(xxp,0,sizeof(double)*n);

		// t= zeros(k,1);
		//for i=1:k
		//    t(i,1)=norm(  x( (1+ind(i)):ind(i+1) ), 2);
		//end
		//tp=t;

		for(i = 0; i<k ; i ++)
			t[i] = dnrm(ind[i+1] - ind[i] ,&x[ind[i]],1,2.0);
		dcopy(k,t,1,tp,1);

		alphap=0; alpha=1;

		for( iterStep = 0 ; iterStep < opt.maxIter ; iterStep ++ ){
			/* ----------------- Step 1 ---------------------- */
			beta=(alphap-1)/alpha;    
			//s=x + beta* xxp; s_t=t + beta * (t-tp);
			dcopy(n,x,1,s,1);
			daxpy(n,beta,xxp,1,s,1);

			dcopy(k,t,1,s_t,1);
			dscal(k,1.0+beta , s_t,1);
			daxpy(k,-beta , tp,1,s_t,1);

			/* ---------------- Step 2 -------------------- */
			// As=Ax + beta* (Ax-Axp);
			dcopy(m,Ax,1,As,1);
			dscal(m,1.0 + beta , As,1);
			daxpy(m, - beta, Axp,1,As,1);

			// Compute ATAs
			normalizedmv('T',&opt,A,As,ATAs);

			// Obtain the gradient g
			//g=ATAs-ATy;
			dcopy(n,ATAs,1,g,1);
			daxpy(n,-1.0,ATy,1,g,1);

			//xp=x;    Axp=Ax;
			// tp=t;

			dcopy(n,x,1,xp,1);
			dcopy(m,Ax,1,Axp,1);
			dcopy(k,t,1,tp,1);

			for(;;){
				// u=s-g/L; v= s_t - lambda * gWeight / L;
				dcopy(n,s,1,u,1);
				daxpy(n,-1.0/L , g,1,u,1);
				dcopy(k,s_t,1,v,1);
				daxpy(k,-lambda/L , gWeights,1,v,1);

				//Projection matlab code:[x, t] = eppVectorR(u, v, ind, n, k);
				for(i=0;i<=k; i++)
					tmp2[i] = ind[i];
				eppVectorR(x,t,u,v,tmp2,n,k);

				// v = x - s;
				dcopy(n,x,1,v,1);
				daxpy(n,-1.0,s,1,v,1);

				// Compute Ax
				normalizedmv('N',&opt,A,x,Ax);

				// Av=Ax -As;
				dcopy(m,Ax,1,Av,1);
				daxpy(m, - 1.0 , As,1,Av,1);

				// r_sum=v'*v + norm(t-s_t)^2; l_sum=Av'*Av;
				r_sum = ddot(n,v,1,v,1) - 2.0*ddot(k,t,1,s_t,1) + ddot(k,t,1,t,1) + ddot(k,s_t,1,s_t,1);
				l_sum = ddot(m,Av,1,Av,1);

				if (r_sum <=1e-20){
		            bFlag=1; //% this shows that, the gradient step makes little improvement
					break;
				}

	            if(l_sum <= r_sum * L)
		            break;
			    else
				    L=max(2*L, l_sum/r_sum);
			}

			/* ----------------- Step 3 -------------- */

			alphap=alpha; alpha= (1.0l+ sqrt(4.0l*alpha*alpha +1.0l))/2.0l;

			ValueL[iterStep]=L;
			//xxp=x-xp;   Axy=Ax-y;
			dcopy(n,x,1,xxp,1);
			daxpy(n,-1.0,xp,1,xxp,1);
			dcopy(m,Ax,1,Axy,1);
			daxpy(m,-1.0,y,1,Axy,1);

			//funVal(iterStep)=Axy'* Axy/2 + lambda * t'* gWeight;
			fVal = fValp = funVal[iterStep] = lambda* ddot(k,t,1,gWeights,1) + ddot(m,Axy,1,Axy,1)/2.0;
			if(iterStep != 0)
				fValp = funVal[iterStep - 1];

			if(terminationCondition(&opt,fVal,fValp,x,dnrm2(n,xxp,1),dnrm2(n,xp,1),iterStep))
				break;
		}
	}
	ret.errorCode = 0;
	ret.x = x;
	ret.funVal = funVal;
	strcpy(ret.type,"glLeastRm2");
	ret.totIter = iterStep;
	ret.ValueL = ValueL;
	return ret;
}







/* Adaptive line search  */
FUNVAL glLeastRm3(double* A, double* y, double z, OPTS opt){
	FUNVAL ret;
	int m,n, iterStep, k,i, bFlag;
	int * ind;
	double q,q_bar, gamma, norm_xp,norm_xxp,tao;
	double* gWeights;
	double *ATy, *x, *Ax, *Axy, *xxp, *xp, *ATAs, *Axp, *As, *g, *s, *norm_ATy, *v, *x_norm, *Av, *tmp1, *tmp2, *t, *s_t, *tp, *u, *ATAx, *Axnew, *xnew, *tnew, *v_t, *ATAxp;
	double *funVal,*ValueL;
	double lambda, alpha, alphap, L, l_sum, r_sum,fVal,fValp, lambda_max, beta;

	m = opt.m;
	n = opt.n;
	initNormalization(&opt,A);

	/* Initialize 'ind' */
	if(!hasGot(&opt,"ind"))
		errorOut("Please set the OPT.ind field before executing again.");
	else{
		ind = opt.ind;
		k = opt.ind_size - 1;
		if(ind[k] != (n))
			errorOut("Check opt.ind, the last value should be n");
	}
	tmp2 = (double *) malloc( sizeof(double) * (k+1));
	gWeights = (double *) malloc( sizeof(double) * k);
	ATy = (double *) malloc( sizeof(double) * n);
	norm_ATy = (double *) malloc( sizeof(double) * k);
	x = (double *) malloc( sizeof(double) * n);
	Ax = (double *) malloc( sizeof(double) * m);
	xp = (double *) malloc( sizeof(double) * n);
	Axp = (double *) malloc( sizeof(double) * m);
	xxp = (double *) malloc( sizeof(double) * n);
	t = (double *) malloc( sizeof(double) * k);
	tp = (double *) malloc( sizeof(double) * k);
	ATAx = (double *) malloc( sizeof(double) * n);
	ATAxp = (double *) malloc( sizeof(double) * n);
	s = (double *) malloc( sizeof(double) * n);
	s_t = (double *) malloc( sizeof(double) * k);
	As = (double *) malloc( sizeof(double) * m);
	ATAs = (double *) malloc( sizeof(double) * n);
	g = (double *) malloc( sizeof(double) * n);
	u = (double *) malloc( sizeof(double) * n);
	v = (double *) malloc( sizeof(double) * n);
	tnew = (double *) malloc( sizeof(double) * k);
	xnew = (double *) malloc( sizeof(double) * n);
	v_t = (double *) malloc( sizeof(double) * k);
	Axnew = (double *) malloc( sizeof(double) * m);
	Av = (double *) malloc( sizeof(double) * m);
	ValueL = (double *) malloc( sizeof(double) * opt.maxIter);
	Axy = (double *) malloc( sizeof(double) * m);
	funVal = (double *) malloc( sizeof(double) * opt.maxIter);

	/* Initialize 'q' */
	if( !hasGot(&opt,"q"))
		opt.q = q = 2;
	else
		if((q = opt.q)<1)
			errorOut("q should be larger than 1");

	/* Initialize gWeights - The weights for each group */
	if(hasGot(&opt,"gWeights")){
		gWeights = opt.gWeights;
		// TO-DO have to assert all gWeights are positive else errorOut
	}
	else
		dcopy(k,oneVector,1,gWeights,1);

	/********* Starting point initialization *******/
	/* Compute ATy */
	normalizedmv('T',&opt,A,y,ATy);

	/* Process the regularization parameter */
	if( opt.rFlag == 0)
		lambda = z;
	else{
		if( z<0.0  || z>1.0)
			errorOut("z should be in range (0,1)");
		if( q == 1)
			q_bar = Inf;
		else if( q > 1.0e6)
			q_bar = 1.0;
		else
			q_bar = q / (q - 1.0);

		//% compute the norm of ATy corresponding to each group
		//norm_ATy=zeros(k,1);
		//for i=1:k
		//	norm_ATy(i,1)=norm( ATy( (1+ind(i)):ind(i+1) ), q_bar );
		//end
		for(i = 0; i <k ; i++)
			norm_ATy[i] = dnrm( ind[i+1] - ind[i], &ATy[ind[i]],1,q_bar);

		// Incorporate gWeight
		//norm_ATy=norm_ATy./gWeight;
		dtbsv('U','N','N',k,0,gWeights,1,norm_ATy,1);

		//    lambda_max=max(norm_ATy);
		lambda_max = idamax(k,norm_ATy,1);

		//lambda=z*lambda_max;
		lambda=z*lambda_max;
	}

	/* Initialize a starting point */
	if ( opt.init == 2)
		memset(x,0,sizeof(double)*n);
	else{
		if ( hasGot(&opt,"x0"))
			x = opt.x0;
		else 
			x = ATy;
	}

	// Compute Ax
	normalizedmv('N',&opt,A,x,Ax);

	// TO-DO add code for condition opt.init==0 where we call initfactor
	bFlag = 1;

	/********************** Adaptive line search **************************/
	if (opt.mFlag==1 && opt.lFlag==1 && opt.q==2){
		L=1;
		//% We assume that the maximum eigenvalue of A'A is over 1
	    
		gamma=1;
		//% we shall set the value of gamma = L,
		//% where L is appropriate for the starting point

		// xp=x; Axp=Ax;
		dcopy(n,x,1,xp,1);
		dcopy(m,Ax,1,Axp,1);

		//zero xxp
		memset(xxp,0,sizeof(double)*n);

/*    for i=1:k
        t(i,1)=norm(  x( (1+ind(i)):ind(i+1) ), 2);
    end
    tp=t;*/

		for(i=0 ; i<k ; i++)
			t[i] = dnrm(ind[i+1] - ind[i] , &x[ind[i]],1,2.0);
		dcopy(k,t,1,tp,1);

		// compute ATAx 
		normalizedmv('T',&opt,A,Ax,ATAx);

		for(iterStep = 0 ; iterStep < opt.maxIter ; iterStep ++ ){
			//ATAxp = ATAx;
			dcopy(n,ATAx,1,ATAxp,1);

			normalizedmv('T',&opt,A,Ax,ATAx);

			/* -=-=-=-=-=-=-=-=-=-=-= Line search for L begins */
			for(;;){
				if(iterStep != 0){
	                alpha= (-gamma+ sqrt(gamma*gamma + 4.0l* L * gamma)) / (2.0l*L);
		            beta= (gamma - gamma* alphap) / (alphap * gamma + alphap* L * alpha);
			        //% beta is the coefficient for generating search point s

					// s=x + beta* xxp;   s_t= t + beta * (t -tp);
					dcopy(n,x,1,s,1);
					daxpy(n,beta,xxp,1,s,1);
					dcopy(k,t,1,s_t,1);
					dscal(k,1.0 + beta, s_t,1);
					daxpy(k,-beta,tp,1,s_t,1);

					//As=Ax + beta* (Ax-Axp);
					dcopy(m,Ax,1,As,1);
					dscal(m,1.0 + beta , As,1);
					daxpy(m,-beta,Axp,1,As,1);

					//ATAs=ATAx + beta * (ATAx- ATAxp);
					dcopy(n,ATAx,1,ATAs,1);
					dscal(n,1.0 +beta, ATAs,1);
					daxpy(n,-beta, ATAxp,1,ATAs,1);
				}
				else{
					alpha= (-1.0l+ sqrt(5.0l)) / 2.0l;
					beta=0.0; 
					//s=x; s_t=t; As=Ax; ATAs=ATAx;
					dcopy(n,x,1,s,1);
					dcopy(k,t,1,s_t,1);
					dcopy(m,Ax,1,As,1);
					dcopy(n,ATAx,1,ATAs,1);
				}
				//             g=ATAs-ATy;
				dcopy(n,ATAs,1,g,1);
				daxpy(n,-1.0,ATy,1,g,1);

				// let s walk in the direction of the antigradient of s
				//u=s-g/L; v= s_t - lambda * gWeight / L;
				dcopy(n,s,1,u,1);
				daxpy(n,-1.0/L,g,1,u,1);
				dcopy(k,s_t,1,v,1);
				daxpy(k,-lambda/L,gWeights,1,v,1);

				// Projection [xnew, tnew] = eppVectorR(u, v, ind, n, k);
				for(i=0;i<=k; i++)
					tmp2[i] = ind[i];
				eppVectorR(xnew,tnew,u,v,tmp2,n,k);

				//v=xnew-s;
				dcopy(n,xnew,1,v,1);
				daxpy(n,-1.0,s,1,v,1);

				//v_t=tnew-s_t;
				dcopy(k,tnew,1,v_t,1);
				daxpy(k,-1.0,s_t,1,v_t,1);

				// compute Axnew
				normalizedmv('N',&opt,A,xnew,Axnew);

				//Av=Axnew -As;
				dcopy(m,Axnew,1,Av,1);
				daxpy(m,-1.0,As,1,Av,1);

				//r_sum=v'*v + v_t'*v_t; l_sum=Av'*Av;
				r_sum = ddot(n,v,1,v,1) + ddot(k,v_t,1,v_t,1);
				l_sum = ddot(m,Av,1,Av,1);

				if (r_sum <=1e-20){
					bFlag=1; //% this shows that, the gradient step makes little improvement
					break;
				}

	            if(l_sum <= r_sum * L)
			        break;
		        else
				    L=max(2.0 *L, l_sum/r_sum);

			}
			gamma=L* alpha* alpha;    alphap=alpha;

			//ValueL(iterStep)=L;
			ValueL[iterStep] = L;

			tao = L * r_sum / l_sum;
			if (tao >= 5.0)
				L=L*0.8;

		    //xp=x;    x=xnew; xxp=x-xp;
			dcopy(n,x,1,xp,1);
			dcopy(n,xnew,1,x,1);
			dcopy(n,x,1,xxp,1);
			daxpy(n,-1.0,xp,1,xxp,1);

			//Axp=Ax;  Ax=Axnew;
			dcopy(m,Ax,1,Axp,1);
			dcopy(m,Axnew,1,Ax,1);

			//tp=t; t=tnew;
			dcopy(k,t,1,tp,1);
			dcopy(k,tnew,1,t,1);

			//Axy=Ax-y;
			dcopy(m,Ax,1,Axy,1);
			daxpy(m, -1.0,y,1,Axy,1);

			//funVal(iterStep)=Axy'* Axy/2 + lambda * t'* gWeight;
			fVal = fValp = funVal[iterStep] = lambda*ddot(k,t,1,gWeights,1) + ddot(m,Axy,1,Axy,1)/2.0l;

			if(iterStep != 0)
				fValp = funVal[iterStep-1] ;

			//norm_xp=sqrt(xp'*xp + tp'*tp);    norm_xxp=sqrt(xxp'*xxp+ norm(t-tp)^2);
			norm_xp = sqrt(ddot(n,xp,1,xp,1) + ddot(k,tp,1,tp,1));
			norm_xxp = sqrt(  ddot(n,xxp,1,xxp,1) - 2.0* ddot(k,tp,1,t,1) + ddot(k,t,1,t,1) + ddot(k,tp,1,tp,1) );
			
			if(terminationCondition(&opt,fVal,fValp,x,norm_xxp,norm_xp,iterStep))
				break;
		}
	}
	ret.errorCode = 0;
	ret.x = x;
	ret.funVal = funVal;
	strcpy(ret.type,"glLeastRm3");
	ret.totIter = iterStep;
	ret.ValueL = ValueL;
	return ret;
}


