#include "slep.h"
#include "hr_time.h"

#include "epph.h"


// Matlab function [x, funVal, ValueL]=LeastR(A, y, z, opts)
FUNVAL LeastR(double* A, double* y, double z, OPTS opt){

	//definition of variables;
	int m = opt.m, n=opt.n, bFlag;
	double *funVal,*ValueL ,fVal,fValp;
	double *xp, *Ax, *Axp, *xxp,*s, *As,*ATAs,*g,*v,*Axy,*ATy,*Av,*t,*tp,*ttp,*s_t,*u,*v_t,*xnew,*tnew, *ATAxp,*ATAx,*Axnew;
	double *x;
	double L,alphap,alpha,beta,lambda,l_sum,r_sum;
	double c_expm20 = 1.0e-20,tmp1,tmp2,tmp3, gamma;

	int i=0,i1=0,maxI;
	FUNVAL funret;

	funret.errorCode= 0;
	
	// Memory allocation
	funVal = (double *) malloc( sizeof(double)* opt.maxIter);
	ValueL = (double *) malloc( sizeof(double)* opt.maxIter);
	x = (double *) malloc( sizeof(double)* n);
	xp = (double *) malloc( sizeof(double)* n);
	xxp = (double *) malloc( sizeof(double)* n);
	Axp = (double *) malloc( sizeof(double)* m);
	Ax = (double *) malloc( sizeof(double)* m);
	Axy = (double *) malloc( sizeof(double)* m);
	ATy = (double *) malloc( sizeof(double)* n);
	ATAs = (double *) malloc( sizeof(double)* n);
	As = (double *) malloc( sizeof(double)* m);
	Av = (double *) malloc( sizeof(double)* m);
	s = (double *) malloc( sizeof(double)* n);
	g = (double *) malloc( sizeof(double)* n);
	v = (double *) malloc( sizeof(double)* n);
	


	/********* Normalization initialization ************/
	initNormalization(&opt,A);
	
	normalizedmv('T',&opt,A,y,ATy);

	maxI = idamax(n,ATy,1);

	if(opt.rFlag == 0)
		lambda = z;
	else
		if( z<=0.0 || z>1.0)
			printf("Error in the value of z");
		else
			lambda = fabs(ATy[maxI])*z;

	//initialize a starting point
	memset(x,0,sizeof(double)* n);

	normalizedmv('N',&opt,A,x,Ax);

	/**********************************************************************************************/

/******* Armijo Goldstein line search + accelerated gradient descent***************/
		if(opt.mFlag==0 && opt.lFlag==0){
				L = 1.0 + opt.rsL2;

				//xp=x; Axp=Ax; xxp=zeros(n,1);
				dcopy(n,x,1,xp,1);
				dcopy(m,Ax,1,Axp,1);
				memset(xxp,0,sizeof(double)* n);

				// alphap and alpha are used for computing the weight in forming search point
				alphap = 0.0;
				alpha = 1.0;

				for(i=0;i< opt.maxIter;++i,bFlag=0){
					// --------------------------- step 1 ---------------------------
					// compute search point s based on xp and x (with beta)
					beta=(alphap-1.0)/alpha;
					//s=x + beta* xxp;
					dcopy(n,x,1,s,1);
					daxpy(n, beta, xxp, 1, s, 1);

					//% --------------------------- step 2 ---------------------------
					//% line search for L and compute the new approximate solution x

					//% compute the gradient (g) at s
					//As=Ax + beta* (Ax-Axp);
					//		copy Ax->As
					//		daxpy(x=Ax,y=As, alpha=beta)
					//		daxpy(x=Axp,y=As, alpha=-beta)
					dcopy(m,Ax,1,As,1);
					daxpy(m, beta, Ax, 1, As, 1);
					daxpy(m, (0.0-beta), Axp, 1, As, 1);
					
					//ATAs=A'*As;
					//dgemv('T',m,n,1.0,A,m,As,1,0.0,ATAs,1); // replace this method with the one below for taking care of normalization
					normalizedmv('T',&opt,A,As,ATAs);

					//      % obtain the gradient g
					//			g=ATAs-ATy + opt.rsL2 * s;
					dcopy(n,ATAs,1,g,1);
					daxpy(n, -1.0, ATy, 1, g, 1);
					daxpy(n, opt.rsL2, s, 1, g, 1);

					//		% copy x and Ax to xp and Axp
					//			xp=x;    Axp=Ax;
					dcopy(n,x,1,xp,1);
					dcopy(m,Ax,1,Axp,1);

					for(;;){
						//% let s walk in a step in the antigradient of s to get v
						//% and then do the l1-norm regularized projection
						//v=s-g/L;
						dcopy(n,s,1,v,1);
						daxpy(n, (-1.0l /L), g, 1, v, 1);

						//% L1-norm regularized projection
						//x=sign(v).*max(abs(v)-lambda / L,0);
						for(i1 = 0;i1<n;i1++){
							double sign = v[i1]<0.0?-1.0:1.0;
							x[i1] = sign * maxof2((fabs(v[i1])-lambda/L),0.0);
						}
						//v=x-s;  % the difference between the new approximate solution x
						//% and the search point s

						dcopy(n,x,1,v,1);
						daxpy(n, -1.0, s, 1, v, 1);

						//% compute A x
						//Ax=A* x;
						//dgemv('N',m,n,1.0,A,m,x,1,0.0,Ax,1); // replace this method with the one below for taking care of normalization
						normalizedmv('N',&opt,A,x,Ax);


						// Av=Ax -As;
						dcopy(m,Ax,1,Av,1);
						daxpy(m, -1.0, As, 1, Av, 1);
						//            r_sum=v'*v; l_sum=Av'*Av;
						r_sum = dnrm2(n,v,1);
						l_sum = dnrm2(m,Av,1);
						l_sum = l_sum*l_sum;
						r_sum = r_sum*r_sum;
						/* if (r_sum <=1e-20)
								bFlag=1; % this shows that, the gradient step makes little improvement
								break;
						end*/
						if( r_sum <= 1.0e-20)
							break;

						/*% the condition is ||Av||_2^2 <= (L - opt.rsL2) * ||v||_2^2
						if(l_sum <= r_sum * (L-opt.rsL2))
							break;
						else
							L=max(2*L, l_sum/r_sum + opt.rsL2);
							% fprintf('\n L=%5.6f',L);
						end*/

						if( l_sum <= r_sum*(L-opt.rsL2) )
							break;
						else
							L = maxof2( 2.0*L , l_sum/r_sum + opt.rsL2);
					}
					//ValueL(iterStep) =  L
					ValueL[i]=L;

					//% --------------------------- step 3 ---------------------------
					//% update alpha and alphap, and check whether converge
					//alphap=alpha; alpha= (1+ sqrt(4*alpha*alpha +1))/2;
					alphap = alpha;
					alpha = (1.0l + sqrt(4.0l*alpha*alpha + 1.0l))/2.0l;

					//xxp=x-xp;   Axy=Ax-y;
					dcopy(n,x,1,xxp,1);
					daxpy(n, -1.0, xp, 1, xxp, 1);
					dcopy(m,Ax,1,Axy,1);
					daxpy(m, -1.0, y, 1, Axy, 1);


					//funVal(iterStep)=Axy'* Axy/2 + opt.rsL2/2 * x'*x + sum(abs(x)) * lambda;
					tmp1 = dnrm2(m,Axy,1);
					tmp1 = tmp1*tmp1;
					tmp2 = dnrm2(n,x,1);
					tmp2 = tmp2*tmp2;
					
					funVal[i]=tmp1/2.0 + tmp2*opt.rsL2/2.0 +dasum(n,x,1)*lambda;
					fVal = funVal[i];
					if(i==0)
						fValp =  funVal[i];
					else
						fValp = funVal[i-1];
					bFlag = terminationCondition(&opt,fVal,fValp,x,dnrm2(opt.n,xxp,1),dnrm2(opt.n,xp,1),i);
					if(bFlag)
						break;
				}
				
				free(xp); free(xxp); free(Axp); free(Ax); free(Axy); free(ATy);
				free(ATAs); free(As); free(Av); free(s); free(g); free(v); 

				funret.funVal = funVal;
				funret.x = x;
				funret.ValueL = ValueL;

				return funret;
			}
/******* End of Armijo Goldstein line search + accelerated gradient descent***************/

/******* Reformulated problem + Nemirovski's scheme***************/
		if(opt.mFlag==1 && opt.lFlag==0){
				L = 1.0 + opt.rsL2;

				//xp=x; Axp=Ax; xxp=zeros(n,1);
				dcopy(n,x,1,xp,1);
				dcopy(m,Ax,1,Axp,1);
				memset(xxp,0,sizeof(double)* n);

				t = (double*) malloc( sizeof(double) * n);
				tp = (double*) malloc( sizeof(double) * n);
				s_t = (double *) malloc( sizeof(double) * n);
				u = (double *) malloc( sizeof(double) * n);
				ttp = (double*) malloc( sizeof(double)* n);
				for(i1=0;i1< n; i1++)
					t[i1] = fabs(x[i1]);
				dcopy(n,t,1,tp,1);


				// alphap and alpha are used for computing the weight in forming search point
				alphap = 0.0;
				alpha = 1.0;
				v_t = (double *) malloc( sizeof(double) * n);				

				for(i=0;i< opt.maxIter;++i,bFlag=0){
					// --------------------------- step 1 ---------------------------
					// compute search point s based on xp and x (with beta)
					//	beta=(alphap-1)/alpha;    
					beta=(alphap-1.0)/alpha;
					//s=x + beta* xxp;
					dcopy(n,x,1,s,1);
					daxpy(n, beta, xxp, 1, s, 1);

					// s_t= t + beta * (t -tp);
					dcopy(n,t,1,s_t,1);
					dscal(n,(1.0 + beta),s_t,1);
					daxpy(n,(0.0 - beta),tp,1,s_t,1);


					//% --------------------------- step 2 ---------------------------
					//% line search for L and compute the new approximate solution x

					//% compute the gradient (g) at s
					//As=Ax + beta* (Ax-Axp);
					//		copy Ax->As
					//		daxpy(x=Ax,y=As, alpha=beta)
					//		daxpy(x=Axp,y=As, alpha=-beta)
					dcopy(m,Ax,1,As,1);
					daxpy(m, beta, Ax, 1, As, 1);
					daxpy(m, (0.0-beta), Axp, 1, As, 1);
					
					//ATAs=A'*As;
					//dgemv('T',m,n,1.0,A,m,As,1,0.0,ATAs,1); // replace this method with the one below for taking care of normalization
					normalizedmv('T',&opt,A,As,ATAs);

					//      % obtain the gradient g
					//			g=ATAs-ATy + opt.rsL2 * s;
					dcopy(n,ATAs,1,g,1);
					daxpy(n, -1.0, ATy, 1, g, 1);
					daxpy(n, opt.rsL2, s, 1, g, 1);

					//		% copy x and Ax to xp and Axp
					//			xp=x;    Axp=Ax;
					// tp=t;
					dcopy(n,x,1,xp,1);
					dcopy(m,Ax,1,Axp,1);
					dcopy(n,t,1,tp,1);

					for(;;){
						//% let s walk in a step in the antigradient of s to get v
						//% and then do the l1-norm regularized projection
						// u =s-g/L;
						dcopy(n,s,1,u,1);
						daxpy(n, (-1.0l /L), g, 1, u, 1);

						// v= s_t - lambda / L;
						dcopy(n,s_t,1,v,1);
						daxpy(n,0.0-lambda/L,oneVector,1,v,1);

						ep1R(x,t,u,v,n);

						// v = x-s;
						dcopy(n,x,1,v,1);
						daxpy(n,-1.0,s,1,v,1);

						//            v_t=t-s_t;

						dcopy(n,t,1,v_t,1);
						daxpy(n,-1.0,s_t,1,v_t,1);

						//% compute A x
						//Ax=A* x;
						//dgemv('N',m,n,1.0,A,m,x,1,0.0,Ax,1); // replace this method with the one below for taking care of normalization
						normalizedmv('N',&opt,A,x,Ax);

						// Av=Ax -As;
						dcopy(m,Ax,1,Av,1);
						daxpy(m, -1.0, As, 1, Av, 1);
						//            r_sum=v'*v; l_sum=Av'*Av;
						tmp1 = dnrm2(n,v,1);
						tmp2 = dnrm2(n,v_t,1); 
						r_sum = tmp1*tmp1 + tmp2*tmp2;

						tmp2 = dnrm2(m,Av,1);
						l_sum = tmp2*tmp2 + tmp1*tmp1*opt.rsL2;
						/* if (r_sum <=1e-20)
								bFlag=1; % this shows that, the gradient step makes little improvement
								break;
						end*/
						if( r_sum <= 1.0e-20)
							break;

						/*% the condition is ||Av||_2^2 <= (L - opt.rsL2) * ||v||_2^2
						if(l_sum <= r_sum * (L-opt.rsL2))
							break;
						else
							L=max(2*L, l_sum/r_sum + opt.rsL2);
							% fprintf('\n L=%5.6f',L);
						end*/

						if( l_sum <= r_sum*L )
							break;
						else
							L = maxof2( 2.0*L , l_sum/r_sum);
					}
					//ValueL(iterStep) =  L
					ValueL[i]=L;

					//% --------------------------- step 3 ---------------------------
					//% update alpha and alphap, and check whether converge
					//alphap=alpha; alpha= (1+ sqrt(4*alpha*alpha +1))/2;
					alphap = alpha;
					alpha = (1.0l + sqrt(4.0l*alpha*alpha + 1.0l))/2.0l;

					//xxp=x-xp;   Axy=Ax-y;
					dcopy(n,x,1,xxp,1);
					daxpy(n, -1.0, xp, 1, xxp, 1);
					dcopy(m,Ax,1,Axy,1);
					daxpy(m, -1.0, y, 1, Axy, 1);


					//funVal(iterStep)=Axy'* Axy/2 + opt.rsL2/2 * x'*x + sum(abs(x)) * lambda;
					tmp1 = dnrm2(m,Axy,1);
					tmp1 = tmp1*tmp1;
					tmp2 = dnrm2(n,x,1);
					tmp2 = tmp2*tmp2;
					
					funVal[i]=tmp1/2.0 + tmp2*opt.rsL2/2.0 +dasum(n,x,1)*lambda;
					fVal = funVal[i];
					if(i==0)
						fValp =  funVal[i];
					else
						fValp = funVal[i-1];
					//                norm_xp=sqrt(xp'*xp + tp'*tp);    norm_xxp=sqrt(xxp'*xxp+ norm(t-tp)^2);
					tmp1 = dnrm2(n,xxp,1);
					tmp3 = dnrm2(n,tp,1);
					tmp1 = sqrt(tmp1*tmp1 + tmp3*tmp3);
					tmp2 = dnrm2(n,xp,1);

					dcopy(n,t,1,ttp,1);
					daxpy(n,-1.0,tp,1,ttp,1);
					tmp3 = dnrm2(n,ttp,1);
					tmp2 = tmp2*tmp2 + tmp3*tmp3;

					bFlag = terminationCondition(&opt,fVal,fValp,x,tmp2,tmp1,i);
					if(bFlag)
						break;
				}

				//	double *xp, *Ax, *Axp, *xxp,*s, *As,*ATAs,*g,*v,*Axy,*ATy,*Av,*t,*tp,*ttp,*s_t,*u,*v_t;
				
				free(xp); free(xxp); free(Axp); free(Ax); free(Axy); free(ATy); free(ATAs); free(As); 
				free(Av); free(s); free(g); free(v); free(t); free(tp); free(ttp); free(u); free(s_t); free(v_t); 

				funret.funVal = funVal;
				funret.x = x;
				funret.ValueL = ValueL;

				return funret;
			}
/******* End of Reformulated problem + Nemirovski's scheme***************/



/******* adaptive line search***************/
		if(opt.mFlag==1 && opt.lFlag==1){
				L = 1.0 + opt.rsL2;
				gamma = 1.0;

				//xp=x; Axp=Ax; xxp=zeros(n,1);
				dcopy(n,x,1,xp,1);
				dcopy(m,Ax,1,Axp,1);
				memset(xxp,0,sizeof(double)* n);

				t = (double*) malloc( sizeof(double) * n);
				tp = (double*) malloc( sizeof(double) * n);
				s_t = (double *) malloc( sizeof(double) * n);
				v_t = (double *) malloc( sizeof(double) * n);
				u = (double *) malloc( sizeof(double) * n);
				ATAxp = (double *) malloc(sizeof(double)*n);
				ATAx = (double *) malloc(sizeof(double)*n);
				xnew = (double*) malloc(sizeof(double) * n);
				tnew = (double*) malloc(sizeof(double) * n);
				Axnew = (double*) malloc(sizeof(double) * m);


				for(i1=0;i1< n; i1++)
					t[i1] = fabs(x[i1]);
				dcopy(n,t,1,tp,1);


				// ATAx=A'*Ax;
				normalizedmv('T',&opt,A,Ax,ATAx);


				for(i=0;i< opt.maxIter;++i,bFlag=0){

					// ATAxp=ATAx;
					// % store ATAx to ATAxp
					dcopy(n,ATAx,1,ATAxp,1);

	
					// Compute ATAx
					normalizedmv('T',&opt,A,Ax,ATAx);


					// Line Search for L begins
					for(;;){

						if(i!=0){
								alpha= (-gamma+ sqrt(gamma*gamma + 4.0* L * gamma)) / (2.0*L);
								beta= (gamma - gamma* alphap) / (alphap * gamma + alphap* L * alpha);
								//% beta is the coefficient for generating search point s

								//s=x + beta* xxp;   s_t= t + beta * (t -tp);

								//s=x + beta* xxp;
								dcopy(n,x,1,s,1);
								daxpy(n, beta, xxp, 1, s, 1);

								// s_t= t + beta * (t -tp);
								dcopy(n,t,1,s_t,1);
								dscal(n,(1.0 + beta),s_t,1);
								daxpy(n,(0.0 - beta),tp,1,s_t,1);

								//As=Ax + beta* (Ax-Axp);
								dcopy(m,Ax,1,As,1);
								dscal(m,(1.0 + beta),As,1);
								daxpy(m,(0.0 - beta),Axp,1,As,1);


								//ATAs=ATAx + beta * (ATAx- ATAxp);
								dcopy(n,ATAx,1,ATAs,1);
								dscal(n,(1.0 + beta),ATAs,1);
								daxpy(n,(0.0 - beta),ATAxp,1,ATAs,1);

								//% compute the search point s, A * s, and A' * A * s
						}
						else{
							alpha = (-1.0 + sqrt(5.0))/2.0;
							beta = 0;
							// s=x; s_t=t; As=Ax; ATAs=ATAx;
							dcopy(n,x,1,s,1);
							dcopy(n,t,1,s_t,1);
							dcopy(m,Ax,1,As,1);
							dcopy(n,ATAx,1,ATAs,1);
						}
						//g=ATAs-ATy + rsL2 * s;
						dcopy(n,ATAs,1,g,1);
						daxpy(n,-1.0,ATy,1,g,1);
						daxpy(n,opt.rsL2,s,1,g,1);


						//% let s walk in a step in the antigradient of s 
						//u=s-g/L;
						dcopy(n,s,1,u,1);
						daxpy(n,-1.0/L,g,1,u,1);

						//v= s_t - lambda / L;
						dcopy(n,s_t,1,v,1);
						daxpy(n,0.0-lambda/L,oneVector,1,v,1);

						//% projection
						//[xnew, tnew]=ep1R(u,v,n);
						//ep1R(double * x, double *t, double * u, double * v, int n);

						ep1R(xnew,tnew,u,v,n);

						//v=xnew-s;  % the difference between the new approximate solution x
						//				% and the search point s
						dcopy(n,xnew,1,v,1);
						daxpy(n,-1.0,s,1,v,1);

						//v_t=tnew-s_t;
						dcopy(n,tnew,1,v_t,1);
						daxpy(n,-1.0,s_t,1,v_t,1);

						normalizedmv('N',&opt,A,xnew,Axnew);
						//Av=Axnew -As; 
						dcopy(m,Axnew,1,Av,1);
						daxpy(m,-1.0,As,1,Av,1);

						// r_sum=v'*v + v_t'*v_t; l_sum=Av'*Av + v'*v * rsL2;
						tmp1 = dnrm2(n,v,1);
						tmp2 = dnrm2(n,v_t,1); 
						r_sum = tmp1*tmp1 + tmp2*tmp2;

						tmp2 = dnrm2(m,Av,1);
						l_sum = tmp2*tmp2 + tmp1*tmp1*opt.rsL2;
						if( r_sum <= 1.0e-20)
							break;
						if( l_sum <= r_sum*(L) )
							break;
						else
							L = maxof2( 2.0*L , l_sum/r_sum );
					}

					gamma=L* alpha* alpha;    
					alphap=alpha;
					//% update gamma, and alphap


					//ValueL(iterStep) =  L
					ValueL[i]=L;

					//% --------------------------- step 3 ---------------------------
						//tao=L * r_sum / l_sum;
						//if (tao >=5)
						//	L=L*0.8;
						//end
						//% decrease the value of L
						tmp1 = L * r_sum/l_sum;
						if(tmp1 >=5.0)
							L = L * 0.8;

						//xp=x;    x=xnew; xxp=x-xp;
						//Axp=Ax;  Ax=Axnew;
						dcopy(n,x,1,xp,1);
						dcopy(n,xnew,1,x,1);
						dcopy(n,x,1,xxp,1);
						daxpy(n,-1.0,xp,1,xxp,1);
						dcopy(m,Ax,1,Axp,1);
						dcopy(m,Axnew,1,Ax,1);

						//% update x and Ax with xnew and Axnew        
						//tp=t; t=tnew;
						//% update tp and t       
						  //      
						dcopy(n,t,1,tp,1);
						dcopy(n,tnew,1,t,1);

						//Axy=Ax-y;
						dcopy(m,Ax,1,Axy,1);
						daxpy(m,-1.0,y,1,Axy,1);

						//funVal(iterStep)=Axy' * Axy/2 + rsL2/2 * x'*x + lambda * sum(t);
						//% compute function value
					
					tmp1 = dnrm2(m,Axy,1);
					tmp2 = dnrm2(n,x,1);
					funVal[i]=tmp1*tmp1/2.0 + tmp2*tmp2*opt.rsL2/2.0 +ddot(n,t,1,oneVector,1)*lambda;
					fVal = funVal[i];
					if(i==0)
						fValp =  funVal[i];
					else
						fValp = funVal[i-1];
					bFlag = terminationCondition(&opt,fVal,fValp,x,dnrm2(opt.n,xxp,1),dnrm2(opt.n,xp,1),i);
					if(bFlag)
						break;
				}

				//	double *xp, *Ax, *Axp, *xxp,*s, *As,*ATAs,*g,*v,*Axy,*ATy,*Av,*t,*tp,*ttp,*s_t,*u,*v_t;
				
				free(xp); free(xxp); free(Axp); free(Ax); free(Axy); free(ATy); free(ATAs); free(As); 
				free(Av); free(s); free(g); free(v); free(t); free(tp); free(u); free(s_t); free(v_t); 
				free(tnew); free(xnew);

				funret.funVal = funVal;
				funret.x = x;
				funret.ValueL = ValueL;

				return funret;
			}
/******* End of adaptive line search***************/
return funret;

}