#ifndef _SGMIN_H_
#define _SGMIN_H_

//#define NDEBUG  //Si esta definida, indica a BOOST que compile como release

//Standar includes.
#include <stdio.h>
#include <stdlib.h> //rand entre otros
#include <iostream> //cout
#include <vector> //operaciones con push_back (find)
#include <complex>
#include <limits> //dog

//Boost inclues.
#include <boost/detail/algorithm.hpp> //sort
#include <boost/numeric/ublas/matrix_sparse.hpp>
#include <boost/numeric/ublas/vector.hpp>
#include <boost/numeric/ublas/io.hpp>
#include <boost/numeric/ublas/matrix.hpp>
#include <boost/numeric/ublas/lu.hpp>
#include <boost/numeric/ublas/matrix_proxy.hpp> //Para subrange
#include <boost/numeric/bindings/traits/ublas_matrix.hpp>
#include <boost/numeric/bindings/traits/ublas_matrix.hpp>
#include <boost/numeric/bindings/traits/ublas_symmetric.hpp>
#include <boost/numeric/bindings/traits/ublas_hermitian.hpp>
#include <boost/timer.hpp>

//----------------------------------------------------------------------------------------------------
//QR decomposition.
extern "C" void dgeqrf_(long *M,
												long *N,
												double *A,
												long *LDA,
												double *TAU,
												double *WORK,
												long *LWORK,
												long *INFO);

//Cholesky decomposition.
extern "C" void dpotrf_(char *UPLO,
												long *N,
												double *A,
												long *LDA,
												long *INFO);

//Single value decomposition.
extern "C" void dgesdd_(char *JOBZ,
												long *M,
												long *N,
												double *A,
												long *LDA,
												double *S,
												double *U,
												long *LDU,
												double *VT,
												long *LDVT,
												double *WORK,
												long *LWORK,
												long *IWORK,
												long *INFO);

//Reflectors for QR.
extern "C" void dormqr_(char *SIDE,
												char *TRANS,
												long *M,
												long *N,
												long *K,
												double *A,
												long *LDA,
												double *TAU,
												double *C,
												long *LDC,
												double *WORK,
												long *LWORK,
												long *INFO);
//----------------------------------------------------------------------------------------------------

using namespace boost::numeric::ublas;

struct SGParameters{
	int metric;
	int motion;
	int complex;
	std::vector<vector<int> > partition; //arreglo de arreglos con diferentes tamaños.
	double gradtol;
	int dimension;
	bool verbose;
	double ftol;
	int Mode;
	int maxiter;
};

/** FParameters 
 *          - FParameters.sigmag = marginal covariance matrix
 *          - FParameters.n: sample size for each value of teh response Y.
 *          - FParameters.Afit: covariance matrix of the fitted values of
 *          the regression of the centered predictors onto a regression
 *          basis fy.
 *          - FParameters.r: number of columns in regression basis fy.
 */
template<typename T>
struct FParameters{
	std::vector<matrix<T> > sigma;
	matrix<T> sigmag;
	vector<int> n;
	matrix<T> Afit;
	int r;
	matrix<T> B; //?????????????? spfc
	matrix<T> means;
};

static SGParameters SGP;

namespace solvers{
	namespace prcg{
		template<typename T>
			struct sgprcgr{
				T fn;
				matrix<T> Yn;
			};
		
		template<typename T>
			sgprcgr<T> sg_prcg (matrix<T> &Y,const FParameters<T> &FP);
	};
	namespace invdgradcg{
		template<typename T>
			vector<T> invdgrad_cg(const vector<T> &Y, const vector<T> &W, double tol, double dl,int nargin);
	};
	namespace invdgradMINRES{
		template<typename T>
			matrix<T> invdgrad_MINRES (matrix<T> &Y, matrix<T> &W, double tol, double dl, int nargin);
	};
	namespace newton{
		template<typename T>
			struct rnewton{
				T fn;
				matrix<T> Yn;
			};
		
		/** SG_NEWTON(Y)	
		 * Optimize the objective function, F(Y) over all
		 *	Y such that Y'*Y = I.  Employs a local iterative search with
		 *	initial point Y and terminates if the magnitude of the gradient 
		 *	falls to gradtol*(initial gradient magnitude) or if the relative 
		 *	decrease in F after some iteration is less than ftol.
		 *
		 *	[fn,Yn]= SG_NEWTON(Y)
		 *	Y is expected to satisfy Y'*Y = I.
		 *	Yn will satisfy Yn'*Yn = I.
		 * role	high level algorithm, Newton's Methods
		 */
		template<typename T>
			rnewton<T> sg_newton (matrix<T> &Y, const FParameters<T> &FP);
	};
	namespace frcg{
		template<typename T>
			struct rfrcg{
				T fn;
				matrix<T> Yn;
			};

		/** SG_FRCG(Y)	
		 * Optimize the objective function, F(Y) over all
		 *	Y such that Y'*Y = I.  Fletcher-Reeves CG iterative search with
		 *	initial point Y and terminates if the magnitude of the gradient 
		 *	falls to gradtol*(initial gradient magnitude) or if the relative 
		 *	decrease in F after some iteration is less than ftol.
		 *
		 *	[fn,Yn]= SG_FRCG(Y)
		 *	Y is expected to satisfy Y'*Y = I.
		 *	Yn will satisfy Yn'*Yn = I.
		 * role	high level algorithm, Fletcher-Reeves Method
		 */
		template<typename T>
			rfrcg<T> sg_frcg (matrix<T> &Y, const FParameters<T> &FP);
	};
	namespace dog{
		template<typename T>
			struct sgDogr{
				T fn;
				matrix<T> Yn;
			};
		
		template<typename T>
			sgDogr<T> sg_DOG (matrix<T>& Y,const FParameters<T> &FP);
	};
};

namespace models{
	namespace lad{
		template<typename T>
			struct rlad{
				matrix<T> W,fn;
				T fp;
			};
		/** [Wn,fn,fp] = lad(Y,X,u,morph,parameters)
		 *
		 * This function implements the Likelihood Acquired Directions (LAD) model
		 * for Dimension Reduction in Regression (Cook and Forzani 2009).
		 * USAGE:
		 * - outputs:
		 *     Wn: generating vectors for the central subspace;
		 *     fn: value of the loss function at the optimal point;
		 *     fp: value of the loss function for the original predictors;
		 *  - inputs:
		 *     Y: Response vector. 
		 *     X: Data matrix. Each row is an observation. It is assumed
		 *        that rows relate with the corresponding rows in Y, so that Y(k) is 
		 *	    the response due to X(k,:). 
		 *     u: Dimension of the sufficient subspace. It must be a 
		 *        natural greater than 1 and smaller than the number of columns in X.
		 *     morph: 'cont' for continuous responses or 'disc' for discrete
		 *     responses.
		 *     parameters (OPTIONAL): structure to set specific values of parameters for
		 *     computations. 
		 *           - parameters.nslices: number of slices for discretization of
		 *           continuous responses.
		 *           - parameters.sg: optional parameters for sg_min (see sg_min 
		 *           documentation for details)
		 *
		 *
		 * --------------------------- REFERENCES -----------------------------------
		 * Cook, R. D. and Forzani, L. (2009). Likelihood-based sufficient dimension reduction. 
		 * Journal of the American Statistical Association. 104 (485): 197-208.\ \
		 * doi:10.1198/jasa.2009.0106.
		 */
		template<typename T>
			rlad<T> lad (const vector<T> &Yaux, const matrix<T> &X, int u, int morph, matrix<T> &initval);
		template<typename T>
			T F4lad (const matrix<T> &W, const FParameters<T> &FP);
		template<typename T>
			matrix<T> dF4lad (const matrix<T> &W, const FParameters<T> &FP);
	};
	namespace pfc{
		/** f = F4pfc(W,FParameters)
		 * This function computes the negative of the log-likelihood for the PFC
		 * model. 
		 *
		 * Inputs:
		 *    - vals: ordered eigenvalues of marginal covariance matrix.
		 *    - FParameters: structure of parameters computed from the sample. It
		 *    contains:
		 *          - FParameters.sigmag = marginal covariance matrix
		 *          - FParameters.n: sample size for each value of the response Y.
		 */
		template<typename T>
			T F4pfc (const vector<T> &vals, const FParameters<T> &FP);
	};
	namespace spfc{
		/** f = F4spfc(D,FParameters,u)
		 *
		 * This function computes the negative of the log-likelihood for the SPFC
		 * model. 
		 *
		 * Inputs:
		 *    - D: structured model for the covariance matrix.
		 *    - FParameters: structure of parameters computed from the sample. It
		 *    contains:
		 *          - FParameters.sigmag = marginal covariance matrix
		 *          - FParameters.n: sample size for each value of teh response Y.
		 *          - FParameters.Afit: covariance matrix of the fitted values of
		 *          the regression of the centered predictors onto a regression
		 *          basis fy.
		 *          - FParameters.r: number of columns in regression basis fy.
		 *     - u: dimension of the reduced subspace.
		 */
		template<typename T>
			T F4spfc (const matrix<T> &D, const FParameters<T> &FP, int u);
	};
	namespace ipfc{
		/** f = F4ipfc(W,FParameters)
		 *
		 * This function computes the negative of the log-likelihood for the IPFC
		 * model. 
		 *
		 * Inputs:
		 *    - valsAfit: ordered eigenvalues of the covariance matrix from the
		 *    fitted values of the regression of the centered predictors onto a basis fy.
		 *    - FParameters: structure of parameters computed from the sample. It
		 *    contains:
		 *          - FParameters.sigmag = marginal covariance matrix
		 *          - FParameters.n: sample size for each value of teh response Y.
		 */
		template<typename T>
			T F4ipfc (const vector<T> &valsAfit, const FParameters<T> &FP);
	};
	namespace hlda{
		/** W: projection matrix onto the reduced subspace
		 * FParameters: structure with needed statistics
		 */
		template<typename T>
			T F4hlda (const matrix<T> &W, const FParameters<T> &FP);
		template<typename T>
			matrix<T> dF4hlda (const matrix<T> &W, const FParameters<T> &FP);
	};
	namespace epfc{
		/** f = F4epfc(W,FParameters)
		 *
		 * This function computes the negative of the log-likelihood for the EPFC
		 * model. 
		 *
		 * Inputs:
		 *    - W: orthogonal basis matrix for the dimension reduction subspace.
		 *    - FParameters: structure of parameters computed from the sample. It
		 *    contains:
		 *          - FParameters.sigmag = marginal covariance matrix
		 *          - FParameters.n: sample size for each value of teh response Y.
		 *          - FParameters.Afit: covariance matrix of the fitted values of
		 *          the regression of the centered predictors onto a regression
		 *          basis fy.
		 */
		template<typename T>
			T F4epfc (const matrix<T> &W, const FParameters<T> &FP);
		/**	Derivative of F (minus the log-likelihood) for the EPFC model.
		 * Inputs:
		 *    - W: orthogonal basis matrix for the dimension reduction subspace.
		 *    - FParameters: structure of parameters computed from the sample. It
		 *    contains:
		 *          - FParameters.sigmag = marginal covariance matrix
		 *          - FParameters.n: sample size for each value of teh response Y.
		 *          - FParameters.Afit: covariance matrix of the fitted values of
		 *          the regression of the centered predictors onto a regression
		 *          basis fy.
		 */
		template<typename T>
			matrix<T> dF4epfc (const matrix<T> &W, const FParameters<T> &FP);
	};
	namespace core{
		/** f = F4core(W,FParameters)
		 *
		 * This function computes the negative of the log-likelihood for the CORE
		 * model. 
		 *
		 * Inputs:
		 *    - W: orthogonal basis matrix for the dimension reduction subspace.
		 *    - FParameters: structure of parameters computed from the sample. It
		 *    contains:
		 *          - FParameters.sigma = array of conditional covariance matrices
		 *          - FParameters.sigmag = marginal covariance matrix
		 *          - FParameters.n: sample size for each value of teh response Y.
		 */
		template<typename T>
			T F4core (const matrix<T> &W, const FParameters<T> &FP);
		/**	Derivative of F (minus the log-likelihood) for the CORE model.
		 * Inputs:
		 *    - W: orthogonal basis matrix for the dimension reduction subspace.
		 *    - FParameters: structure of parameters computed from the sample. It
		 *    contains:
		 *          - FParameters.sigma = array of conditional covariance matrices
		 *          - FParameters.sigmag = marginal covariance matrix
		 *          - FParameters.n: sample size for each value of teh response Y.
		 */
		template<typename T>
			matrix<T> dF4core (const matrix<T> &W, const FParameters<T> &FP);
	};
};

/*! \brief Matrix initialization with random numbers
 * \param m matrix for initialization */
template<typename T>
void init (matrix<T> &m){
	for(int i=0;i<m.size1();i++){
		for(int j=0;j<m.size2();j++){
			m(i,j) = rand() / (double)RAND_MAX;
		}
	}
}

template<typename T>
void init (matrix<T,column_major> &m){
	for(int i=0;i<m.size1();i++){
		for(int j=0;j<m.size2();j++){
			m(i,j) = rand() / (double)RAND_MAX;
		}
	}
}

template<typename T>
void init (vector<T> &m){
	for(int i=0;i<m.size();i++){
		m(i) = rand() / (double)RAND_MAX;
	}
}

template<typename T>
vector<T> diag (const matrix<T> &m){
	int k = min(m.size1(),m.size2());
	vector<T> d (k);
	for(int i=0;i<k;i++) d(i) = m(i,i);
	return d;
}

template<typename T, typename matrix_t>
	vector<T> diag (const matrix_t &m){
	int mx = max(m.size1(),m.size2());
	vector<T> v (mx);
	for(int i=0;i<mx;i++) v(i) = m(i,i);
	return v;
}

template<typename T>
matrix<T> vdiagtomat (const vector<T> &v){
	matrix<T> ret (v.size(),v.size());
	for(int i=0;i<v.size();i++){
		for(int j=0;j<v.size();j++){
			ret(i,j) = (i == j) ? v(i) : static_cast<T>(0);
		}
	}
	//for(int i=0;i<v.size();i++) ret(i,i) = v(i);
	return ret;
}

template<typename T>
T sumsum (const matrix<T> &A){
	T sum = static_cast<T>(0);
	for(int i=0;i<A.size1();i++){
		for(int j=0;j<A.size2();j++){
			sum += A(i,j);
		}
	}
	return sum;
}

template<typename T>
matrix<T> vdiagtomat (const vector<std::complex<T> > &v){
	matrix<T> ret (v.size(),v.size());
	for(int i=0;i<v.size();i++){
		for(int j=0;j<v.size();j++){
			ret(i,j) = (i == j) ? real(v(i)) : static_cast<T>(0);
		}
	}
	return ret;
}

//Para el sort
template<typename T>
bool sortasc (T i,T j) { return (i<j); }
template<typename T>
bool sortdesc (T i,T j) { return (i>j); }


template<typename T>
matrix<T> tangent (matrix<T> &Y, const matrix<T> &D){
	matrix<T> vert = prod(matrix<T>(trans(Y)),D);
	matrix<T> verts = (vert+trans(vert))/static_cast<T>(2);
	
	matrix<T> Hnew (Y.size1(),Y.size2()); //Solo por el size
	
	switch(SGP.metric){
	case 0 : {
		Hnew = D - prod(Y,verts);
		break;
	}
	case 1 : {
		Hnew = D - prod(Y,verts);
		break;
	}
	case 2 : {
		Hnew = D - prod(Y,matrix<T>(trans(vert)));
		break;
	}
	default : {
		printf("Invalid metric.\n");
		//exit(1);
	}
	}
	if((norm_frobenius(Hnew)>0) && ((norm_frobenius(vert)/norm_frobenius(Hnew))>1.e6)){
		vert = prod(trans(Y),Hnew);
		verts = (vert+trans(vert))/2;
		Hnew = Hnew - prod(Y,verts);
	}
	return Hnew;
}

template<typename T>
matrix<T> grad (int dFtype, matrix<T> &Y, const FParameters<T> &FP){
	matrix<T> dFY;
	switch(dFtype){
	case 0 : { //lad
		dFY = models::lad::dF4lad<T>(Y,FP);
		break;
	}
	default : {
		printf("dF type incorrect.\n");
		break;
	}
	}
	return -tangent(Y,dFY); //matlab
}

template<typename T>
matrix<T> elemwiseprod(const matrix<T> &A, const matrix<T> &B){
	if((A.size1() != B.size1()) || (A.size2() != B.size2())){
		printf("Invalid matrix sizes.\n");
		//exit(1);
	}
	matrix<T> C (A.size1(),A.size2());
	for(int i=0;i<A.size1();i++){
		for(int j=0;j<A.size2();j++){
			C(i,j) = A(i,j) * B(i,j);
		}
	}
	return C;
}

template<typename T>
vector<T> elemwisemul(const vector<T> &v, T alpha){
	vector<T> res = v;
	for(int i=0;i<res.size();i++) res(i) *= alpha;
	return res;
}

template<typename T>
vector<T> elemwisediv(const vector<T> &v, T alpha){
	vector<T> res = v;
	if(alpha < 1.e-6) 
		for(int i=0;i<res.size();i++) res(i) *= 1e-6;
	else
		for(int i=0;i<res.size();i++) res(i) *= alpha;
	return res;
}

template<typename T>
void elemwisediv(matrix<T> &m, T alpha){
	if(alpha < 1.e-6) {
		for(int i=0;i<m.size1();i++){
			for(int j=0;j<m.size2();j++){
				m(i,j) /= 1.e-6;
			}
		}
	}
	else{
		for(int i=0;i<m.size1();i++){
			for(int j=0;j<m.size2();j++){
				m(i,j) /= alpha;
			}
		}
	}
}

template<typename T>
double ip (const matrix<T> &Y, const matrix<T> &H1, const matrix<T> &H2){
	double i;
	switch(SGP.metric){
	case 0 : {
		//unconstrained metric
		i = sumsum<T>(real(elemwiseprod<T>(conj(H1),H2)));
		break;
	}
	case 1 : {
		//euclidean metric
		i = sumsum<T>(real(elemwiseprod<T>(conj(H1),H2)));
		break;
	}
	case 2 : {
		//canonical metric
		i = sumsum<T>(real(elemwiseprod<T>(conj(H1),H2))) - 
			sumsum<T>(real(elemwiseprod(matrix<T>(conj(prod(matrix<T>(trans(Y)),H1))),matrix<T>(prod(matrix<T>(trans(Y)),H2)))));
		i /= 2.;
		break;
	}
	default : {
		printf("Invalid metric.\n");
		//exit(1);
	}
	}
	return i;
}

template<typename T> 
const T& min (const T& a, const T& b){
  return (a<b) ? a : b;    
}

template<typename T> 
const T& max (const T& a, const T& b){
  return (a>b) ? a : b;    
}

template<typename T>
matrix<T> zeros(int m,int n){
	matrix<T> res (m,n);
	for(int i=0;i<m;i++){
		for(int j=0;j<n;j++){
			res(i,j) = static_cast<T>(0);
		}
	}
	return res;
}

template<typename T>
matrix<T> zeros(int m){
	matrix<T> res (m,m);
	for(int i=0;i<m;i++){
		for(int j=0;j<m;j++){
			res(i,j) = static_cast<T>(0);
		}
	}
	return res;
}

template<typename T>
vector<T> vzeros (int m){
	vector<T> res (m);
	for(int i=0;i<m;i++) res(i) = static_cast<T>(0);
	return res;
}

template<typename T>
matrix<T> eye (int m,int n){
	int k = min(m,n);
	matrix<T> iden = zeros<T>(m,n);
	for(int i=0;i<k;i++) iden(i,i) = static_cast<T>(1);
	return iden;
}

template<typename T>
matrix<T> eye (int m){
	matrix<T> iden = zeros<T>(m,m);
	for(int i=0;i<m;i++) iden(i,i) = static_cast<T>(1);
	return iden;
}

template<typename T, typename vector_t>
	vector_t elemwiselog (const vector_t &v){
	vector_t vaux (v.size());
	for(int i=0;i<v.size();i++) vaux(i) = log(v(i));
	return vaux;
}

template<typename T>
T logdet (const matrix<T> &M){
	matrix<T, column_major> Chol = M;
	T *pChol = &Chol(0,0);
	long N = Chol.size1(), LDA = Chol.size2(), info;
	dpotrf_("U",&N,pChol,&LDA,&info);
	if(info != 0){ 
		printf("Error %d in potrf.\n",info);
		//exit(1);
	}
	else
		return (static_cast<T>(2)*sum(elemwiselog< T,vector<T> >(diag< T,matrix<T,column_major> >(Chol))));
}

template<typename T>
struct rclamp{
	matrix<T> nY, nD;
};

template<typename T>
rclamp<T> clamp (matrix<T> &Y, matrix<T> &D, int nargin, int nargout){
	bool clamp_off = false;
	if(clamp_off){
		if(nargin == 1){ 
			rclamp<T> rc = {Y,D}; //El segundo no sirve
			return rc;
		}
		if((nargin == 2) && (nargout == 1)){ 
			rclamp<T> rc = {D,Y}; //El segundo no sirve
			return rc;
		}
		if((nargin == 2) && (nargout == 2)){ 
			rclamp<T> rc = {Y,D};
			return rc;
		}	
	}
	if(nargin == 1){ 
		matrix<T> rcnY = prod(Y,matrix<T>(1.5*eye<T>(Y.size2(),Y.size2())-0.5*prod(matrix<T>(trans(Y)),Y)));
		rclamp<T> rc = {rcnY,rcnY}; //segundo no sirve
		return rc;
	}
	if((nargin == 2) && (nargout == 1)){
		matrix<T> vert = prod(trans(Y),D);
		matrix<T> verts = (vert+trans(vert))/2;
		rclamp<T> rc  = {D - prod(Y,verts),D}; //Segundo argumento al cuete
		return rc;
	}
	if((nargin == 2) && (nargout == 2)){ 
		matrix<T> aux1 = 0.5*prod(trans(Y),Y);
		matrix<T> aux2 = 1.5*eye<T>(Y.size2());
		matrix<T> rcnY = prod(Y,matrix<T>(aux2-aux1));
		matrix<T> vert = prod(matrix<T>(trans(Y)),D);
		matrix<T> verts = (vert+trans(vert))/static_cast<T>(2);
		matrix<T> rcnD = D - prod(Y,verts);
		rclamp<T> rc = {rcnY,rcnD};
		return rc;
	}
}

template<typename T>
void getQR (matrix<T> &AA, matrix<T,column_major> &A, vector<T> &TAU, matrix<T> &Q, matrix<T> &R){

	long M = A.size1(), N = A.size2(), K = std::min(M,N); //'L'
	T *pA = &A(0,0);
	long LDA = max(M,N);
	T *pTAU = &TAU(0);
	T *pC = &Q(0,0);
	long LDC = max(M,N);
	long LWORK = N*N;
	vector<T> WORK (LWORK); T *pWORK = &WORK(0);
	long info;

	dormqr_("L","T",&M,&N,&K,pA,&LDA,pTAU,pC,&LDC,pWORK,&LWORK,&info);

	if(info != 0){
		printf("Error %d in sormqr_.",info);
		//exit(1);
	}

	R = prod(trans(Q),AA); // R = Q^tA
	//------------------------
	//Adaptaciones matlab!
	AA = -1*AA;
	R = subrange(R,0,2,0,2);
	R = -1*R;
	//------------------------
}

//http://mate.uprh.edu/~pnm/notas4031/matlab/apendice.htm
//La idea es sumar todos los términos necesarios hasta producir un resultado que, en la precisión 
//finita la de computadora, no cambie aunque más términos sean añadidos.
template<typename T>
matrix<T> expm (const matrix<T> &A){
	int M = A.size1(), N = A.size2();
	matrix<T> E (M,N), F(M,N);
	E = zeros<T>(M,N); 
	//std::cout << E << std::endl;
	F = eye<T>(M,N); 
	//std::cout << F << std::endl;
	int k = 1;
	while(norm_1(E+F-E) > 0){
		E = E+F;
		F = prod(A,F);
		F = F/static_cast<T>(k);
		k++;
		//std::cout << E << std::endl;
		//std::cout << F << std::endl;
		//getchar();
	}
	//printf("k=%d.\n",k);
	return E; 
}

template<typename T>
struct rmove{
	matrix<T> Yo, Ho;
};

template<typename T>
rmove<T> move (const matrix<T> &Yi, const matrix<T> &Hi, T t, int nargout = 2){
	int n = Yi.size1(), k = Yi.size2(); 
	matrix<T> rmYo = Yi, rmHo = Hi; //Alocado
	double mag1;
	if(t == 0){
		//rm.Yo = Yi;
		//if(nargout == 2) rm.Ho = Hi;
		rmove<T> rm = {Yi,Hi};
		return rm;
	}
	if(nargout == 2) mag1 = sqrt(ip<T>(Yi,Hi,Hi));
	switch(SGP.metric){
	case 0 : {
		//Move by straight lines with a qr projection back to the manifold
		if(nargout == 2) mag1 = sqrt(ip(Yi,Hi,Hi));

		matrix<T,column_major> A = Yi+t*Hi; T *pA = &A(0,0);
		matrix<T> AA = A;

		long M = A.size1(), N = A.size2(), LDA = M;
		vector<T> TAU (std::min(M,N)); T *pTAU = &TAU(0);
		long LWORK = N*N; //A ojo!
		vector<T> WORK (max(1,(int)LWORK)); T *pWORK = &WORK(0);
		long info;

		dgeqrf_(&M,&N,pA,&LDA,pTAU,pWORK,&LWORK,&info);

		if(info != 0){
			printf("Error %d solving sgeqrf_.",info);
			//exit(1);
		}

		//Separacion QR
		identity_matrix<T> I(A.size1(),min(A.size1(),A.size2()));
		matrix<T> Q(I), R(min(A.size1(),A.size2()),A.size2());
		getQR(AA,A,TAU,Q,R);

		if(nargout == 2) rmHo = Hi;
		break;
	}
	case 1 : {
		if(SGP.motion == 0){
		  //This section computes approximate euclidean geodesics 
			//using polar decomposition, though just clamping is more 
			//efficient and as accurate for short distances.
			if(nargout == 2) mag1 = sqrt(ip(Yi,Hi,Hi));
			
			matrix<T,column_major> A = Yi+t*Hi; T *pA = &A(0,0);
			long M = A.size1(), N = A.size2(), LDA = M; //LDA distancia entre elementos de la misma fila (column order!).
			vector<T> S (min(M,N));	T *pS = &S(0); //s->S
			matrix<T> U (M,M); T *pU = &U(0,0); //JOBZ='O' sino otra forma.  S->U
			long LDU = M;
			matrix<T> VT (N,N); T *pVT = &VT(0,0);
			long LDVT = N;
			vector<T> WORK;
			long LWORK = 3*std::min(M,N) + max(max(M,N),5*std::min(M,N)*std::min(M,N)+4*std::min(M,N));
			WORK.resize(LWORK); T *pWORK = &WORK(0);
			vector<long> IWORK (8*min(M,N)); long *pIWORK = &IWORK(0);
			long info;

			dgesdd_("S",&M,&N,pA,&LDA,pS,pU,&LDU,pVT,&LDVT,pWORK,&LWORK,pIWORK,&info);

			//------------------------------------------------
			//matlab!
			matrix<T> rU (M,N); 

			for(int i=0;i<N;i++){
				for(int j=0;j<M;j++){
					rU(j,i) = U(i,j);
				}
			}
			//-----------------------------------------------

			rmYo = prod(rU,matrix<T>(trans(VT)));

			if(nargout == 2) rmHo = Hi;
		}
		else{
			//This section computes exact euclidean geodesics
			if(nargout == 2) mag1 = sqrt(ip<T>(Yi,Hi,Hi));
			matrix<T> a = prod(Yi,Hi);
			//EXPM(X) is the matrix exponential of X.  EXPM is computed using
			//a scaling and squaring algorithm with a Pade approximation.
			matrix<T,column_major> A = Hi-prod(Yi,a); 
			matrix<T> AA = A; //---------------------------------qr(A,0) economy size
			T *pA = &AA(0,0);

			long M = A.size1(), N = A.size2(), LDA = M;
			vector<T> TAU (std::min(M,N)); T *pTAU = &TAU(0);
			long LWORK = N*N; //A ojo!
			vector<T> WORK (LWORK); T *pWORK = &WORK(0);
			long info;

			dgeqrf_(&M,&N,pA,&LDA,pTAU,pWORK,&LWORK,&info);
			
			if(info != 0){
				printf("Error %d solving sgeqrf_.",info);
				//exit(1);
			}

			//Separacion QR
			identity_matrix<T> I(A.size1(),A.size2());
			matrix<T> Q = I, R(A.size2(),A.size2());
			getQR<T>(AA,A,TAU,Q,R);
			matrix<T> mn, nm = -t*a;
			nm = expm(nm);
			if(nargout == 2){ 
				Q = prod(Yi,subrange(mn,0,k,k+1,2*k)) + prod(Q,subrange(mn,k+1,2*k,k+1,2*k)); 
				rmHo = prod(Yi,a)+prod(Q,matrix<T>(prod(R,nm)));
			}
		}
		break;
	}
	case 2 : {/*
		if(SGP.motion == 0){
			if(nargout == 2) mag1 = sqrt(ip(Yi,Hi,Hi));
			matrix<T> a = prod(Yi,Hi);
			
			matrix<T,column_major> A = Hi-prod(Yi,a);
			matrix<T> AA = A;
			vector<T> TAU (min(A.size1(),A.size2()));
			lapack::geqrf(A,TAU);
			identity_matrix<T> I(A.size1(),A.size2());
			matrix<T> q = I, r(A.size2(),A.size2());
			getQR(AA,A,TAU,q,r);
			matrix<T> geo;   //---------------------
			matrix<T> mn1 = (eye<T>(2*k)+geo/static_cast<T>(2))/(eye<T>(2*k)-geo/static_cast<T>(2));
			matrix<T> mn = subrange(mn1,0,mn1.size1(),0,k);
			rm.Yo = prod(Yi,subrange(mn,0,k,0,mn.size2())) + prod(q,matrix<T>(subrange(mn,k+1,2*k,0,mn.size2())));
			if(nargout==2) 
				rm.Ho = prod(Hi,matrix<T>(subrange(mn,0,k,0,mn.size2())));// - prod(Yi,prod(trans(r),matrix<T>(subrange(mn,k+1,2*k,0,mn,mn.size2()))));
		}
		else{
			if(nargout==2) mag1 = sqrt(ip(Yi,Hi,Hi));
			matrix<T> a = prod(trans(Yi),Hi);
			matrix<T,column_major> A = Hi-prod(Yi,a);
			matrix<T> AA = A;
			vector<T> TAU (min(A.size1(),A.size2()));
			lapack::geqrf(A,TAU);
			identity_matrix<float> I(A.size1(),A.size2());
			matrix<float> q(I), r(A.size2(),A.size2());
			getQR(AA,A,TAU,q,r);
			matrix<T> geo;
			matrix<T> mn1 = expm<T>(geo), mn = matrix<T>(subrange(mn1,0,mn1.size1(),0,k)); //estos k, son k-1?
			rm.Yo = prod(Yi,matrix<T>(subrange(mn,0,k,0,mn.size2()))) + 
				prod(q,matrix<T>(subrange(mn,k+1,2*k,0,mn.size2())));
			if(nargout == 2){
				rm.Ho = prod(Hi,subrange(mn,0,k,0,mn.size2())) - prod(Yi,prod(trans(r),subrange(mn,k+1,2*k,0,mn.size2())));
			}		
			}*/
		break;
	}
	default : {
		printf("Invalid metric.\n");
		//exit(1);
	}
	}	
	if(nargout == 1){ 
		rclamp<T> rc = clamp<T>(rmYo,rmHo,1,1); //argin,argout
		rmYo = rc.nY;
	}
	else if (nargout == 2){
		rclamp<T> rc = clamp<T>(rmYo,rmHo,2,2);
		double mag2 = sqrt(ip(rc.nY,rc.nD,rc.nD));
		rmYo = rc.nY;
		rmHo = rc.nD * mag1/mag2;
	}
	rmove<T> rm = {rmYo,rmHo};
	return rm;
}

template<typename T>
int sgmindimension(const matrix<T> &Y0){
	int dim;
	int N = Y0.size1(), P = Y0.size2();

	int np = SGP.partition.size(); 
	if(!SGP.complex){
		dim = N*P;
		dim = dim - P*(P+1)/2;
		for(int i=0;i<np;i++){
			int k = SGP.partition[i].size();
			dim = dim - k*(k-1)/2;
		}
	}
	else{
		dim = 2*N*P;
		dim = dim - P*P;
		for(int i=0;i<np;i++){
			int k = SGP.partition[i].size();
			dim = dim - k*k;
		}
	}
	return dim;
}

template<typename V>
matrix<V> dtangent(const matrix<V> &Y, const matrix<V> &H, const matrix<V> &dY){
	matrix<V> vert = prod(trans(Y),H), verts = (vert+trans(vert))/2;
	matrix<V> dvert = prod(trans(dY),H), dverts = (dvert+trans(dvert))/2;
	matrix<V> T;
	switch(SGP.metric){
	case 0 : {
		T = -prod(dY,verts)-prod(Y,dverts);
		break;
	}
	case 1 : {
		T = -prod(dY,verts)-prod(Y,dverts);
		break;
	}
	case 2 : {
		T = -prod(dY,trans(vert))-prod(Y,trans(dvert));
		break;
	}
	default : {
		printf("Invalid metric.\n");
		//exit(1);
	}
	}
	return T;
}

template<typename T>
int part (const vector<T> &Y0){
	int N = Y0.size1(), P = Y0.size2();
	//QR
	matrix<T> A; //=prod(trans(Y),dF(Y));
	matrix<T> As = (A+trans(A))/2, Aa = (A-trans(A))/2;
	//M
	return 1;
}

template<typename T>
vector<T> zeros(int m){
	vector<T> res (m);
	for(int i=0;i<m;i++){
		res(i) = static_cast<T>(0);
	}
	return res;
}

template<typename T>
matrix<T> connection (matrix<T> &Y, matrix<T> &H1, matrix<T> &H2){
	switch(SGP.metric){
	case 0 : {
		//The unconstrained connection
		matrix<T> C = zeros<T>(Y.size1(),Y.size2());
		return C;
	}
	case 1 : {
		//The euclidean connection for the stiefel
		matrix<T> C = prod(Y,matrix<T>(prod(matrix<T>(trans(H1)),H2)+prod(matrix<T>(trans(H2)),H1)))/2.;
		return C;
	}
	case 2 : {
		//The canonical connection for the stiefel
		matrix<T> b = prod(H1,H2)-prod(matrix<T>(prod(H1,Y)),matrix<T>(prod(Y,H2)));
		matrix<T> C = (prod(H1,matrix<T>(prod(H2,Y)))+prod(H2,matrix<T>(prod(H1,Y))))/2+prod(Y,matrix<T>(b+trans(b)))/2;
		return C;
	}
	default : {
		printf("Invalid metric.\n");
		//exit(1);
	}
	}
}

template<typename T>
T gradline(matrix<T> &Y, const matrix<T> &H, T t, FParameters<T> &FP, int stype = 0) {
	Y = move<T>(Y,H,t,FP);
	matrix<T> g = grad<T>(stype,Y,FP);
	return ip<T>(Y,g,g);
}

template<typename T>
T Fline (const matrix<T> &Yi, const matrix<T> &Hi, T t, const FParameters<T> &FP, int ftype = 0){//ftype==0, lad
	rmove<T> rm = move<T>(Yi,Hi,t,1); //Yo,Ho
	T fval = models::lad::F4lad<T>(rm.Yo,FP);
	return fval;
}

template<typename T>
T dFline (const matrix<T> &Yi, const matrix<T> &Hi, T t, const FParameters<T> &FP, int ftype = 0){//ftype==0, lad
	rmove<T> rm = move<T>(Yi,Hi,t); //Yo,Ho
	T dfval = ip<T>(rm.Yo,rm.Ho,matrix<T>(grad<T>(0,rm.Yo,FP)));
	return dfval;
}

template<typename T>
struct rfzero{
	T b, fval;
};

template<typename T>
rfzero<T> fzero (vector<T> range, const matrix<T> &Y, const matrix<T> &dr, const FParameters<T> &FP, int dFtpe = 0){
	int fcount = 0, iter = 0, intervaliter = 0, exitflag = 1;
	T tol = std::numeric_limits<T>::epsilon(); 
	T a,b, savea, saveb, fa, fb, savefa, savefb;
	if(range.size() == 2){
		a = range(0); b = range(1);
		savea = a; saveb = b;
		fa = dFline(Y,dr,a,FP); fb = dFline(Y,dr,b,FP);
		fcount += 2;
		savefa = fa; savefb = fb;
		if (fa == static_cast<T>(0)){
			rfzero<T> rfz = {a,fa};
			return rfz;
		}
		else if (fb == static_cast<T>(0)){
			rfzero<T> rfz = {b,fb};
			return rfz;
		}
		else if ((fa > static_cast<T>(0)) == (fb > static_cast<T>(0))){
			printf("MATLAB:fzero:ValuesAtEndPtsSameSign.\nThe function values at the interval endpoints must differ in sign.");
			//exit(1);
		}
	}
	else if (range.size() == 1){
		//falta programar este caso!!!
	}
	else{
		printf("MATLAB:fzero:LengthArg2, Second argument must be of length 1 or 2.");
		//exit(1);
	}
	T fc = fb;
	T c, d, e, m, toler;
	//Main loop, exit from middle of the loop
	while((fb != 0) && (a != b)){
		//Insure that b is the best result so far, a is the previous
    //value of b, and c is on the opposite side of the zero from b.
		if((fb > 0) == (fc > 0)){
			c = a;
			fc = fa;
			d = b-a;
			e = d;
		}
		if((fabs(fc) < fabs(fb))){
			a = b;
			b = c;
			c = a;
			fa = fb;
			fb = fc;
			fc = fa;
		}
		//Convergence test and possible exit
		m = 0.5*(c-b);
		toler = 2.0*tol*std::max(fabs(b),1.0);
		if((fabs(m) <= toler) || fabs(fa) <= fabs(fb)) break;

		//Choose bisection or interpolation
		if((fabs(e) < toler) || (fabs(fa) <= fabs(fb))){
			d = m;
			e = m;
		}
		else{
			T s = fb/fa, p, q, r;
			if (a == c){
				p = 2.0*m*s;
				q = 1.0 - s;
			}
			else{
				q = fa/fc;
				r = fb/fc;
				p = s*(2.0*m*q*(q-r)-(b-a)*(r-1.0));
				q = (q - 1.0)*(r-1.0)*(s-1.0);
			}
			if(p>0) q = -q;
			else p = -p;
			//Is interpolated point acceptable
			if(((2.0*p < 3.0*m*q - fabs(toler*q)) && (p < fabs(0.5*e*q)))){
				e = d;
				d = p/q;
			}
			else{
				d = m;
				e = m;
			}
		}
		a = b;
		fa = fb;
		if(fabs(d) > toler) b += d;
		else if(b>c) b -= toler;
		else b += toler;
		fb = dFline(Y,dr,b,FP);
		fcount += 1;
		iter += 1;
	}
	rfzero<T> rfz = {b,fb};
	return rfz;
}

template<typename T>
struct rfminbnd{
	T xf, fval;
};

template<typename T>
rfminbnd<T> fminbnd (T ax, T bx, const matrix<T> &Y, const matrix<T> &dr, const FParameters<T> &FP, int dFtpe = 0){
	//Definimos constantes para la terminar las iteraciones
	T tol = 1.e-4;
	int maxiter = 500, maxfun = 500, funccount = 0;
	//chequea si el limite superior para buscar la solución es mayor que el
	//inferior. Sni no lo es, sale con error
	if(ax > bx){
		printf("the upper bound must be larger than the lower bound.\n");
		//exit(1);
	}
	//Computa un estimador inicial
	T eps = std::numeric_limits<T>::epsilon(); 
	T seps = sqrt(eps);
	T c = 0.5*(3.0-sqrt(5.0));
	T a = ax, b = bx, v = a+c*(b-a), w = v, xf = v, d = 0., e = 0., x = xf;
	T fx = Fline(Y,dr,x,FP);
	funccount += 1;
	int iter = 0;
	T fv = fx, fw = fx, xm = 0.5*(a+b), tol1 = seps*fabs(xf)+tol/3.0, tol2 = 2.0*tol1;
	T r, p, q, si, fval;
	//Main loop
	while((fabs(xf-xm)) > (tol2 - 0.5*(b-a))){
		int gs = 1;
		//Is a parabolic fit possible?
		if((fabs(e)) > tol1){
			gs = 0;
			r = (xf-w)*(fx-fv);
			q = (xf-v)*(fx-fw);
			p = (xf-v)*q-(xf-w)*r;
			q = 2.0*(q-r);
			if(q > static_cast<T>(0)) p = -p;
			q = fabs(q);
			r = e;
			e = d;

			//Is the parabola aceptable
			if((fabs(p) < fabs(0.5*q*r)) && (p > q*(a-xf)) && (p < q*(b-xf))){
				//Yes, parabolic interpolation step
				d = p/q;
				x = xf+d;
				//f must not be evaluated too close to ax or bx
				if(((x-a) < tol2) || ((b-x) < tol2)){
					si = sign(xm-xf) + ((xm-xf) == 0);
					d = tol1*si;
				}
			}
			else{
				//Not aceptable, must do a golden section step
				gs = 1;
			}
		}
		if(gs){
			//A golden-section step is required
			if(xf >= xm) e = a-xf;
			else e = b-xf;
			d = c*e;
		}
		//The function must not be evaluated too close to fx
		si = sign(d) + (d==0);
		x = xf+si*std::max(fabs(d),tol1);
		T fu = Fline(Y,dr,x,FP);
		funccount += 1;
		iter += 1;

		//Update a, b, v, w, x, xm, tol1, tol2
		if(fu <= fx){
			if(x >= xf) a = xf;
			else b = xf;
			v = w;
			fv = fw;
			w = xf;
			fw = fx;
			xf = x;
			fx = fu;
		}
		else{
			if(x < xf) a = x;
			else b = x;
			if((fu <= fw) || (w == xf)){
				v = w; fv = fw;
				w = x; fw = fu;
			}
			else if ((fu <= fv) || (v == xf) || (v == w)){
				v = x; fv = fu;
			}
		}
		xm = 0.5*(a+b);
		tol1 = seps*fabs(xf)+tol/3.0;
		tol2 = 2.0*tol1;
		if((funccount >= maxfun) || (iter >= maxiter)){
			fval = fx;
			rfminbnd<T> rfmb = {xf,fval};
		}
	}
	fval = fx;
	rfminbnd<T> rfmb = {xf,fval};
	return rfmb;
}

template<typename T>
vector<T> invgrad_CG(const vector<T> &Y, const vector<T> &W, double tol, int nargin, int dl = 0) {
	int oldd = 0;
	vector<T> x = 0*W, r = -W;
	double rho2 = ip(Y,r,r), rs = rho2, oldrho2;
	double gepr2;
	if(nargin>=3) gepr2 = tol*tol;//?????
	else gepr2 = rho2*SGP.gradtol*SGP.gradtol;
	int posdef = 1, cn = -1, Nmax = SGP.dimension, reset = 1;
	vector<T> d;
	double beta;
	while((posdef && (rho2 > gepr2) && (cn>Nmax)) || reset){
		cn++;
		if(reset){
			d = -r;
			reset = 0;
		}
		else{
			beta = rho2/oldrho2; 
			d = -r+beta*d;
		}
		//Application of the hessian
		vector<T> Ad = (dl == 0) ? dgrad(Y,d) : dl*d+dgrad(Y,d);
		double dAd = ip(Y,d,Ad);
		if(dAd <= 0) posdef = 0;
		else{
			double dist = rho2/dAd;
			x = x+dist*d;
			r = r+dist*Ad; r = clamp(Y,r);
			double oldrho2 = rho2;
			rho2 = ip(Y,r,r);
		}
	}
	if(cn == 0){
		x = W;
		if(abs(dl)>0) x = x/dl;
	}
	if(SGP.verbose && (posdef == 0)) printf("invdgrad: Hessian not positive definite, CG terminating early.\n");
	if(SGP.verbose && (cn == Nmax)) printf("invdgrad: max iterations reached inverting the hessian by CG");
}

template<typename T>
vector<T> ones (int P){
	vector<T> Y(P);
	for(int i=0;i<P;i++) Y(i) = static_cast<T>(1);
	return Y;
}

template<typename T>
vector<int> find (const vector<T> &v){
	std::vector<int> res;
	for(int i=0;i<v.size();i++){
		if(v(i) != 0) res.push_back(i);
	}
	return res;
}

template<typename T>
vector<T> getmatrow (const matrix<T> &m,int row){
	vector<T> v (m.size2());
	for(int i=0;i<m.size2();i++) v(i) = m(row,i);
	return v;
}

template<typename T>
matrix<T> elemwiseabs (const matrix<T> &M){
	matrix<T> res = M;
	for(int i=0;i<M.size1();i++){
		for(int j=0;j<M.size2();j++){
			res(i,j) = fabs(M(i,j));  //sabems que es float/double, sino abs
		}
	}
	return res;
}

template<typename T>
void partition (const matrix<T> &Y0){
	int N = Y0.size1(), P = Y0.size2();
	matrix<T> Y (N,P), YY = Y; init(Y); T *pY = &YY(0,0);
	long LDA = N;
	vector<T> r (std::min(N,P)); T *pr = &r(0);
	long LWORK = P*P; //A ojo!
	vector<T> WORK (LWORK); T *pWORK = &WORK(0);
	long info;

	dgeqrf_(&N,&P,pY,&LDA,pr,pWORK,&LWORK,&info);
	
	if(info != 0){
		printf("Error %d solving sgeqrf_.",info);
		//exit(1);
	}
	
	//Separacion QR
	identity_matrix<float> I(Y.size1(),min(Y.size1(),Y.size2()));
	matrix<float> Q(I), R(min(Y.size1(),Y.size2()),Y.size2());
	getQR(YY,Y,r,Q,R);

	matrix<T> A = outer_prod(Y,dF(Y));
	matrix<T> As = (A+trans(A))/2;
	matrix<T> Aa = (A-trans(A))/2;
	matrix<bool> M = (abs(Aa)<1e-7*abs(As)); //<---------- mmmmmmmmm (comparacion 1 a 1)
	vector<T> scorebd = ones<T>(P); int np = -1; //matlab 1
	SGP.partition.resize(P); //definicion del mapeo de v<v> a cell (matlab).
	for(int j=0;j<P;j++){
		if(scorebd(j)){
			np++;
			vector<int> aux = find<bool>(getmatrow<bool>(M,j));
			//Lento pero funciona.
			SGP.partition[np].resize(aux.size());
			SGP.partition[np] = aux;
			//scorebd???
		}
	}
	matrix<bool> Mcomp = 0*M; //???
	for(int i=0;i<SGP.partition.size();i++){
		for(int j=0;j<SGP.partition[i].size();j++){
			Mcomp(SGP.partition[i][j],SGP.partition[i][j]) = 1; //??????????
		}
	} 
	bool goodpart = (sumsum(matrix<T>(elemwiseabs(matrix<T>(Mcomp-M)))) == 0);
	if(!goodpart){
#pragma warning(Unable to find consistent partition of F.)
		for(int i=0;i<SGP.partition.size();i++) SGP.partition[i] = i; //part = num2cell(1:P)
	}
}

template<typename T>
void nosym (const matrix<T> &Y, matrix<T> &H){
	matrix<T> vert = prod(trans(Y),H);
	for(int j=0;j<SGP.partition.size();j++){
		for(int k=0;k<SGP.partition[j].size();k++){ 
			//H(SGP.partition[j][k]) = H(SGP.partition[j][k]) - Y(SGP.partition[j][k])*vert(SGP.partition[j][k],SGP.partition[j][k]); //????
		}
	}
}

/** ddf = DDF(dF,Y,H)
 *
 * This function computes the second derivative of F, that is,
 * ddf = d/dx dF(Y+x*H).
 *
 * dF is a handle to the function derivative.
 * Y is expected to satisfy Y'*Y = I
 * H is expected to be the same size as Y
 * ddf will be the same size as Y
 *
 * See SG_MIN documentation for further details.
 */
template<typename T>
matrix<T> ddF (matrix<T> &Y, matrix<T> &H,const FParameters<T> &FP, int dFtype = 0){
	matrix<T> res;
	double ep = 1e-6;
	T n = norm_frobenius(H);
	matrix<T> M1 = Y+ep*H/n, M2 = Y-ep*H/n;
	matrix<T> dF1 = models::lad::dF4lad<T>(M1,FP), dF2 = models::lad::dF4lad<T>(M2,FP);
	res = (dF1-dF2)/(2.*ep/n);
	return res;
}

/** DGRAD	Computes the tangent vector, W, which results from applying the
 * 	geometrically correct hessian at the stiefel point,Y, to the
 * 	tangent vector H.
 *
 *	W = DGRAD(Y,H)
 *	Y is expected to satisfy Y'*Y = I
 *	H is expected to satisfy H = tangent(Y,H0)
 *	W will satisfy W = tangent(Y,W0)
 *
 * role	geometrized objective function, the is the routine called to apply
 *	the covariant hessian of F to a tangent vector.  The analog is
 *	W = A*H, here A is the hessian of F.
 */
template<typename T>
matrix<T> dgrad (matrix<T> &Y, matrix<T> &H, const FParameters<T> &FP, int dFtype = 0){
	if(SGP.metric == 0){ 
		matrix<T> W = tangent(Y,ddF<T>(Y,H,FP)); //todo con lad!
		nosym(Y,W);
		return W;
	}
	else{
		matrix<T> df = models::lad::dF4lad<T>(Y,FP);
		df = -df;
		matrix<T> g = tangent<T>(Y,df);
		matrix<T> sp1 = connection<T>(Y,g,H);
		matrix<T> sp2 = dtangent<T>(Y,df,H);
		matrix<T> sp3 = tangent<T>(Y,ddF(Y,H,FP));
		matrix<T> W = sp1 + sp2 + sp3;
		W = -W; //matlab
		nosym(Y,W);
		return W;
	}
}

template<typename T>
T rem (T x, T y){
	int r;
	if (y < 1.e-6)
		r = static_cast<int>(x/1.e-6); //redondea
	else
		r = static_cast<int>(x/y); //redondea
	return x-static_cast<T>(r)*y;
}

template<typename T>
int sign (T val){
	int k = (val>=0) ? 1 : -1;
	return k;
}

template<typename T>
bool isreal(const matrix<T> &m) {
	for(int i=0;i<m.size1();i++){
		for(int j=0;j<m.size2();j++){
			//if(m(i,j) != real(m(i,j))) return false;
		}
	}
	return true;
}

template<typename T>
bool isreal(const vector<T> &v) {
	/*
		for(int i=0;i<v.size();i++){
		if(v(i) != real(v(i))) return false;
		}
	*/
	return true;
}

#define PI 3.141592653589793

//LU system invertion
template<typename T>
matrix<T> inv (const matrix<T> &A){
	matrix<T> Acp = A, B (A.size1(),A.size2());
	permutation_matrix<std::size_t> pm (A.size1());
	lu_factorize(Acp,pm);
	B.assign(identity_matrix<T>(A.size1()));
	lu_substitute(Acp,pm,B);
	return B;
}

template<typename T>
struct reig{
	matrix<T> V, D; //Eigenvectores, eigenvalues.
};

template<typename T>
reig<T> eig (const matrix<T> &X){
	if(X.size1() != X.size2()){
		printf("X must be square!\n");
		//exit(1);
	}
	matrix<T,column_major> vl(X.size1(),X.size2()), vr(X.size1(),X.size2());
	vector< std::complex<T> > w (X.size1()); //main diagonal of D

	long N  = X.size2();
	matrix<T> A = X; T *pA = &A(0,0);
	long LDA = X.size1();
	vector<T> WR (N); T *pWR = &WR(0);
	vector<T> WI (N); T *pWI = &WI(0);
	matrix<T> VL; 
	long LDVL = max(X.size1(),N);
	VL.resize(LDVL,N);
	T *pVL = &VL(0,0);
	matrix<T> VR; 
	long LDVR = max(X.size1(),N);
	VR.resize(LDVR,N);
	T *pVR = &VR(0,0);
	vector<T> WORK;
	long LWORK = 4*N;
	WORK.resize(LWORK);
	T *pWORK = &WORK(0);
	long info;

	dgeev_('V','V',&N,pA,&LDA,pWR,pWI,pVL,&LDVL,pVR,&LDVR,pWORK,&LWORK,&info);

	if(info != 0){
		printf("Error %d computing sgeev.",info);
		//exit(1);
	}

	for(int i=0;i<w.size()/2;i++){ //invertimos w, asi queda igual que matlab
		std::complex<T> aux = w(i);
		w(i) = w(w.size()-1-i);
		w(w.size()-1-i) = aux;
	}
	matrix<T> D = vdiagtomat(w);
	reig<T> reseigen;
	reseigen.V = vr; reseigen.D = D;
	return reseigen;
}

void invert_vals (matrix<float> &A){
	float eps = std::numeric_limits<float>::epsilon();
	for(int i=0;i<A.size1();i++){
		for(int j=0;j<A.size2();j++){
			if(A(i,j) < eps) A(i,j) = eps;
			A(i,j) = static_cast<float>(1)/A(i,j);
		}
	}
}

void invert_vals (matrix<double> &A){
	double eps = std::numeric_limits<double>::epsilon();
	for(int i=0;i<A.size1();i++){
		for(int j=0;j<A.size2();j++){
			if(A(i,j) < eps) A(i,j) = eps;
			A(i,j) = static_cast<double>(1)/A(i,j);
		}
	}
}

void invert_vals (vector<float> &A){
	float eps = std::numeric_limits<float>::epsilon();
	for(int i=0;i<A.size();i++){
		if(A(i) < eps) A(i) = eps;
		A(i) = static_cast<float>(1)/A(i);
	}
}

void invert_vals (vector<double> &A){
	double eps = std::numeric_limits<double>::epsilon();
	for(int i=0;i<A.size();i++){
		if(A(i) < eps) A(i) = eps;
		A(i) = static_cast<double>(1)/A(i);
	}
}

template<typename T>
matrix<T> elemwisesqrt (const matrix<T> &A){
	matrix<T> Aaux = A;
	for(int i=0;i<A.size1();i++){
		for(int j=0;j<A.size2();j++){
			Aaux(i,j) = sqrt(Aaux(i,j));
		}
	}
	return Aaux;
}

template<typename T>
vector<T> elemwisesqrt (const vector<T> &A){
	vector<T> Aaux = A;
	for(int i=0;i<A.size();i++){
		Aaux(i) = sqrt(Aaux(i));
	}
	return Aaux;
}

/** C = invsqrtm(A,B)
 *
 * This function computes the inverse of the square-root of a matrix.
 * When A and B are given, it is assumed that A is a matrix of 
 * eigenvectors and that B is the corresponding matrix of eigenvalues
 * (i.e. A and B are taken from the spectral decomposition of a matrix
 * M by using [A,B]=eig(M)). When only one matrix is given as argument, 
 * function performs its eigendecomposition before computing its 
 * square-root.
 */
template<typename T>
matrix<T> invsqrtm (const matrix<T> &A, const matrix<T> &B, int nargin){
	reig<T> reseigen;
	matrix<T> C;
	vector<T> BB;
	if(nargin < 2){
		reseigen = eig<T>(A); //V,D
		BB = diag(reseigen.D);
		invert_vals(BB);
		C = prod(reseigen.V,matrix<T>(prod(vdiagtomat(elemwisesqrt(BB)),trans(reseigen.V)))); //matrix,vector,matrix
	}
	else{
		BB = diag(B);
		C = prod(A,matrix<T>(prod(vdiagtomat(elemwisesqrt(BB)),trans(A)))); //matrix,vector,matrix
	}
	return C;
}


template<typename T>
matrix<T> get_cov (const matrix<T> &X){
	int rows = X.size1(), cols = X.size2();
	vector<T> u (cols);
	int i,j;
	for(i=0;i<cols;i++){ //medias
		u(i) = 0.;
		for(j=0;j<rows;j++) u(i) = u(i)+X(j,i);
		u(i) = u(i) / static_cast<T>(rows);
	}
	//std::cout << u <<std::endl;
	matrix<T> covariance (cols,cols);
	for(i=0;i<cols;i++){ //covarianzas
		for(j=0;j<cols;j++){
			covariance(i,j) = 0.;
			for(int k=0;k<rows;k++){ 
				covariance(i,j) += (X(k,i)-u(i))*(X(k,j)-u(j));
			}
			covariance(i,j) /= static_cast<T>(rows-1);
			covariance(i,j) = covariance(i,j) * (static_cast<T>(rows-1)/static_cast<T>(rows)); //sesgo
		}
	}
	//std::cout << covariance <<std::endl;
	return covariance;
}

template<typename T, typename matrix_t>
	struct gparsr{
		std::vector< matrix_t > sigmas;
		matrix<T> means;
		vector<int> counts;
	};

/** function sigma = get_pars(X,Y,h)
 * 
 * This function estimates conditional covariance matrices and means for
 * each one of the h populations in the sample (Y,X). 
 * The number of observation for each populations is given too.
 *
 * USAGE:
 *  - outputs: 
 *     - sigmas: array of conditional covariance matrices.
 *     - means: matrix of predictors' means for each population
 *     - sizes: vector with the number of observations/population
 *  - inputs:
 *     - X: matrix of predictors
 *     - Y: response vector
 *     - h: number of populations or slices
 */
template<typename T, typename matrix_t>
	gparsr<T,matrix_t> get_pars (vector<T> &Y, const matrix_t &X, int h){
	int ncols = X.size2();

	std::vector< matrix_t > sigmas (h);
	matrix<T> means (X.size2(),h);
	vector<int> counts (h);
	
	matrix_t z = zeros<T>(ncols,ncols);
	for(int i=0;i<h;i++) sigmas.push_back(z);

	vector<int> acounts(h);
	for(int i=0;i<h;i++) acounts(i) = static_cast<int>(0);
	counts = acounts;

	means = zeros<T>(ncols,h);
	
	if(!isinZ(Y)){
		h = Y.size();
		for(int i=0;i<h;i++) Y(i) = i;
	}

	for(int j=1;j<=h;j++){
		std::vector< vector<T> > Xjaux;
		for(int i=0;i<Y.size();i++){ //Xj=(Y==j,:)
			if(Y(i) == j){
				vector<T> aux (X.size2());
				for(int k=0;k<X.size2();k++) aux(k) = X(i,k);
				Xjaux.push_back(aux);
			}
		}
		matrix<T> Xj (Xjaux.size(),X.size2());
		for(int i=0;i<Xj.size1();i++){
			for(int k=0;k<Xj.size2();k++){
				Xj(i,k) = Xjaux[i](k);
			}
		}
		counts(j-1) = Xj.size1();
		
		vector<T> vmeans = mean(matrix<T>(trans(Xj)));
		for(int i=0;i<means.size1();i++) means(i,j-1) = vmeans(i);
		
		matrix_t covariance = get_cov<T>(Xj);
		
		sigmas[j-1] = covariance;
	}
	gparsr<T,matrix_t> gp = {sigmas,means,counts};
	return gp;
}

/** FParameters = setdatapars(Y,X,h)
 *
 * This function gets basic statistics from the data sample: marginal
 * covariance matrix, conditional means and covariance matrices and sample
 * size for each value of Y.
 *
 * Inputs:
 * - Y: response vector.
 * - X: predictor matrix.
 * - h: number of different values in Y. It equates the number of
 * populations when Y is discrete and the number of slices for a discretized
 * continuous response.
 */
template<typename T, typename matrix_t>
	FParameters<T> setdatapars (vector<T> &Y, const matrix_t &X, int h){
	gparsr<T,matrix_t> gp = get_pars<T,matrix_t>(Y,X,h);
	FParameters<T> FP;
	FP.sigmag = get_cov<T>(X); 
	FP.sigma = gp.sigmas;
	FP.means = gp.means;
	FP.n = gp.counts;
	return FP;
}

template<typename T>
struct rindxsort{
	vector<T> values, indxs;
};

template<typename T>
rindxsort<T> indxsort (const vector<T> &v, bool asc=true){
	vector<T> vaux = v;
	rindxsort<T> ris;
	ris.values.resize(vaux.size());
	ris.indxs.resize(vaux.size());
	int indx = 0; T val;
	if(asc){
		for(int i=0;i<vaux.size()-1;i++){
			val = vaux(i);
			for(int j=i+1;j<vaux.size();j++){
				if(vaux(j) < val){
					indx = j;
					val = vaux(j);
				}
			}
			//Actualizo resultado
			ris.values(i) = val;
			ris.indxs(i) = indx;
			//Switch
			T aux = vaux(i);
			vaux(i) = val;
			vaux(indx) = aux;
		}
	}
	else{
		for(int i=0;i<vaux.size()-1;i++){
			val = vaux(i);
			for(int j=i+1;j<vaux.size();j++){
				if(vaux(j) > val){
					indx = j;
					val = vaux(j);
				}
			}
			//Actualizo resultado
			ris.values(i) = val;
			ris.indxs(i) = indx;
			//Switch
			T aux = vaux(i);
			vaux(i) = val;
			vaux(indx) = aux;
		}
	}
	return ris;
}

template<typename T>
struct rfirsteigs{
	matrix<T> Vk;
	vector<T> diagkD;
};

/** [Vk,diagkD] = firsteigs(A,k)
 *
 * This function gives the first k eigenvalues of matrix A along with the 
 * corresponding eigenvectors. Eigenvectors are stored in Vk, while eigenvalues 
 * are stored in diagkD.
 */
template<typename T>
rfirsteigs<T> firsteigs (const matrix<T> &A, int k, int nargin){
	if(nargin < 2) k = A.size2(); //Aca es size(A,2), size en dir y
	reig<T> reseig = eig(A); //V != matlab, D = matlab
	rindxsort<T> ris = indxsort(diag(reseig.D),false); //descendent, values = matlab, indxs != matlab
	vector<T> aux (k); 
	copy(ris.values.begin(),ris.values.begin()+k,aux.begin());
	matrix<T> Vk = subrange(reseig.V,0,A.size1(),0,k);
	rfirsteigs<T> rfe = {Vk,aux};
	return rfe;
}

/** out = isinZ(a);
 * 
 * This function verifies that a is an integer-valued array;
 */
template<typename T>
bool isinZ (const vector<T> &a){
	/*
	if(isreal(a)){
		double eps = pow(2,-22);
		int N = a.size(), count = 0;
		for(int k=0;k<N;k++){
			T aux = fabs(fmod(a(k),static_cast<T>(2)));
			if((aux < eps) || (fabs(aux-static_cast<T>(1)) < eps)) count++;
		}
		if(count == N) return true;
		else return false;
	}
	else
		printf("Invalid argument for isinZ function.\n");
	*/
	return true;
}

template<typename T>
T min (const vector<T> &v){
	T rmin = v(0);
	for(int i=1;i<v.size();i++){
		if(v(i) < rmin) rmin = v(i);
	}
	return rmin;
}

template<typename T>
T max (const vector<T> &v){
	T rmax = v(0);
	for(int i=1;i<v.size();i++){
		if(v(i) > rmax) rmax = v(i);
	}
	return rmax;
}

/** [newY] = mapdata(Y)
 * 
 * This function offsets the response vector Y to make it start at Y=1.
 */
template<typename T>
vector<T> mapdata (const vector<T> &Y){
	if(!isinZ(Y)){ 
		printf("This function is valid for discrete Y only.\n");
		//exit(1);
	}
	vector<T> newY = Y;
	T minY = min<T>(Y);
	for(int i=0;i<Y.size();i++){
		newY(i) += fabs(minY) + 1;
	}
	return newY;
}

namespace solvers{
	namespace prcg{
		template<typename T>
			sgprcgr<T> sg_prcg (matrix<T> &Y,const FParameters<T> &FP){
			matrix<T> g = grad<T>(0,Y,FP); //0-lad
			double mag = sqrt(ip<T>(Y,g,g)), geps = mag*SGP.gradtol,feps = SGP.ftol;
			T f = models::lad::F4lad<T>(Y,FP);
			int N = 0;
			T oldf = 2.*f, oldmag = mag;
			if(SGP.verbose){
				printf("iter \t grad \t F(Y) \n");
				printf("%d \t %f \t %f \n",N,mag,f);		
			}
			bool reset = 1;
			T oldrho, rho, alpha, gdr, drHdr, cga, cgb, newf, maga, magb, fa, fb;
			matrix<T> olddr (Y.size1(),Y.size2());
			while(((mag>geps) || (fabs(oldf/f-1.)>feps) || reset) && (N <SGP.maxiter)){
				N++;
				bool gradsat = (mag <= geps), fsat = (fabs(oldf/f-1.)<=feps);
				//if (sat) fun = 'gradline'; else fun = 'Fline'
				rho = ip(Y,g,g);
				matrix<T> dr = -g;
				if(!reset){
					alpha = -rho/oldrho;
					dr = dr - alpha*olddr;
				}
				else reset = 0;
				gdr = ip<T>(Y,dr,-g); dr = dr*sign(gdr); gdr = abs(gdr);
				matrix<T> Hessdr = dgrad<T>(Y,dr,FP,0); 
				drHdr = ip<T>(Y,dr,Hessdr); 
				cga = fabs(gdr/drHdr);
				if(fsat){
					while((dFline<T>(Y,dr,0,FP) > 0) == (dFline<T>(Y,dr,2.*cga,FP) > 0)){
						cga = static_cast<T>(2)*cga;
					}
					vector<T> range(2); range(0) = 0; range(1) = 2.0*cga;
					rfzero<T> rfz = fzero<T>(range,Y,dr,FP);
					cgb = rfz.fval;
				}
				else{
					if(cga < 1.e-6) break;
					rfminbnd<T> rfmb = fminbnd<T>(-cga,cga,Y,dr,FP);
					cgb = rfmb.xf;
				}
				rmove<T> rma = move<T>(Y,dr,cga), rmb = move<T>(Y,dr,cgb);
				if(fsat){
					matrix<T> ga = grad<T>(0,rma.Yo,FP), gb = grad<T>(0,rmb.Yo,FP);
					maga = sqrt(ip<T>(rma.Yo,ga,ga));
					magb = sqrt(ip<T>(rmb.Yo,gb,gb));
					
					if(maga<magb){
						mag = maga; Y = rma.Yo; g = ga; olddr = rma.Ho;
					}
					else{
						mag = magb; Y = rmb.Yo; g = gb; olddr = rmb.Ho;
					}
					newf = models::lad::F4lad<T>(Y,FP);
				}
				else{
					fa = models::lad::F4lad<T>(rma.Yo,FP);
					fb = models::lad::F4lad<T>(rmb.Yo,FP); //lad!!!!!!!!!!!!!
					if(fa<fb){
						newf = fa; Y = rma.Yo; olddr = rma.Ho;
					}
					else{
						newf = fb; Y = rmb.Yo; olddr = rmb.Ho;
					}
					g = grad<T>(0,Y,FP); mag = sqrt(ip<T>(Y,g,g));
				}
				oldf = f; f = newf, oldrho = rho;
				if(SGP.verbose) printf("%d \t %f \t %f \n",N,mag,f);		
				if(rem(N,SGP.dimension) == 0.) reset = 1;
			}
			solvers::prcg::sgprcgr<T> r = {f,Y}; 
			return r;
		}
	};
	namespace invdgradcg{
		template<typename T>
			vector<T> invdgrad_cg(const vector<T> &Y, const vector<T> &W, double tol, double dl,int nargin) {
			if(nargin < 4) dl = static_cast<T>(0);
			double old = 0;
			vector<T> x = 0*W, r = -W;
			double rho2 = ip(Y,r,r), rs = rho2, oldrho, oldrho2;  //------Ojo con oldrho2
			double gepr2 = (nargin >= 3) ? tol*tol : rho2*SGP.gradtol*SGP.gradtol;
			int posdef = 1, cn = -1, Nmax = SGP.dimension, reset = 1;
			while((posdef && (rho2 > gepr2) && (cn < Nmax)) || reset){
				cn++;
				vector<T> d;
				double beta;
				if(reset){
					d = -r;
					reset = 0;
				}
				else{
					beta = rho2/oldrho2;
					d = -r+beta*d;
				}
				vector<T> Ad = (dl==0) ? dgrad(Y,d) : dl*d+dgrad(Y,d);
				double dAd = ip(Y,d,Ad), dist;
				if(dAd <= 0) posdef = 0;
				else{
					dist = rho2/dAd;
					x = x+dist*d;
					r = r+dist*Ad;
					r = clamp(Y,r);
					oldrho = rho2;
					rho2 = ip(Y,r,r);
				}
			}
			if(cn==0){
				x = W;
				if(abs(dl) > 0) x = x/dl;
			}
			if(SGP.verbose && (posdef == 0)) printf("invdgrad: Hessian not positive definite, CG terminating early");
			if(SGP.verbose && (posdef == 0)) printf("invdgrad: max iterations reached inverting the hessian by CG");
			vector<T> H = clamp(Y,x);
			return H;
		}
	};
	namespace newton{
		template<typename T>
			rnewton<T> sg_newton (matrix<T> &Y, const FParameters<T> &FP){
			matrix<T> g = grad(0,Y,FP);
			double mag = sqrt(ip<T>(Y,g,g));
			T geps = mag*SGP.gradtol;
			T f = models::lad::F4lad<T>(Y,FP); //-----------------------------------------
			T feps = SGP.ftol;
			int N=0; T oldf = 2.*f, oldmag = mag;
			if(SGP.verbose){ 
				printf("%s\t%s\t%s\t%s","iter","grad","F(Y)","none");
				printf("%d\t%e\t%e\t%s",N,mag,f,"none");
			}	
			T newf;
			while((mag>geps)||(fabs((oldf/f)-1.)>feps)){
				N = N+1;
				bool gradsat = (mag <= geps);
				bool fsat = (fabs((oldf/f)-1.) <= feps);
				matrix<T> sdr = -g;
				double gsdr = ip<T>(Y,sdr,g);
				T sa = -f/gsdr;
				if(fsat){//gradline
					//sa act
				}
				else{//Fline
					//sa act
				}
				double sdrHsdr = ip<T>(Y,sdr,dgrad(Y,sdr,FP));
				T sb = -gsdr/fabs(sdrHsdr);
				if(fsat){//gradline
					//sb act
				}
				else{//Fline
					//sb act
				}
				rmove<T> Ysa = move(Y,sdr,sa), Ysb = move(Y,sdr,sb);
				//note: the MINRES algorithm is more reliable of stiff problems than the CG algorithm.
				matrix<T> ndr ;//= solvers::invdgradcg::invdgrad_cg<T>(Y,-g,SGP.gradtol*oldmag,oldmag,3); //---------------no importa el 4to arg, son 3!
				T gndr = ip<T>(Y,ndr,g);
				T na = -f/gndr;
				if(fsat){//gradline
					//na act
				}
				else{//Fline
					//na act
				}
				T nb = 1.;//-gndr/fabs(ndrHndr); //-------------------------------------------------
				if(fsat){//gradline
					//nb act
				}
				else{//Fline
					//nb act
				}
				bool steptype; //true: steepest, false: Newton
				rmove<T> Yna = move(Y,ndr,na), Ynb = move(Y,ndr,nb);
				if(fsat){
					matrix<T> gsa = grad<T>(0,Ysa.Yo,FP); 
          matrix<T>	gsb = grad<T>(0,Ysb.Yo,FP);  
					matrix<T> gna = grad<T>(0,Yna.Yo,FP);
					matrix<T> gnb = grad<T>(0,Ynb.Yo,FP);
					T magsa = sqrt(ip(Ysa.Yo,gsa,gsa)); 
					T magsb = sqrt(ip(Ysb.Yo,gsb,gsb)); 
					T magna = sqrt(ip(Yna.Yo,gna,gna));
					T magnb = sqrt(ip(Ynb.Yo,gnb,gnb));
					if(min(magsa,magsb)<min(magna,magnb)){
						if(SGP.verbose) steptype = true;
						if(magsa<magsb){
							mag = magsa; Y = Ysa.Yo; g = gsa;
						}
						else{
							mag = magsb; Y = Ysb.Yo; g = gsb;
						}
					}
					else{
						if(SGP.verbose) steptype = false;
						if(magsa<magsb){
							mag = magna; Y = Yna.Yo; g = gna;
						}
						else{
							mag = magnb; Y = Ynb.Yo; g = gnb;
						}
					}
					newf = models::lad::F4lad(Y,FP); //--------------------------------
				}
				else{
					//Aca va la otra parte
				}
				oldf = f; f = newf;
				mag = sqrt(ip(Y,g,g));
				if(SGP.verbose){ 
					if(steptype) printf("%d\t%e\t%e\t%s",N,mag,f,"steepest step");
					else printf("%d\t%e\t%e\t%s",N,mag,f,"newton step");
				}
			}
			rnewton<T> rn;
			rn.fn = f; rn.Yn = Y;
			return rn;
		}
	};
	namespace frcg{
		template<typename T>
			rfrcg<T> sg_frcg (matrix<T> &Y, const FParameters<T> &FP){
			rfrcg<T> rf;
			matrix<T> g = grad<T>(0,Y,FP);
			T mag = sqrt(ip(Y,g,g));
			T geps = mag*SGP.gradtol;
			T f = models::lad::F4lad<T>(Y,FP);
			T feps = SGP.ftol, oldf = .0;
			matrix<T> olddr; 
			int N = 0;
			if(SGP.verbose){
				printf("%s\t%s\t%s\t%s","iter","grad","F(Y)");
				printf("%d\t%e\t%e",N,mag,f);
			}
			int reset = 1;
			while((fabs((oldf/f)-1.)>feps)||reset){
				N = N+1;
				bool gradsat = (mag<=geps), fsat = (fabs((oldf/f)-1.)<=feps);
				matrix<T> dr = -g;
				if(!reset){
					matrix<T> Hessolddr = dgrad<T>(Y,olddr,FP);
					T alpha = ip(Y,dr,Hessolddr)/ip(Y,olddr,Hessolddr);
					dr = dr-alpha*olddr;
				}
				else reset=0;
				double gdr = ip<T>(Y,dr,-g); dr = dr*sign(gdr); gdr = fabs(gdr);
				matrix<T> Hessdr = dgrad(Y,dr,FP); 
				T drHdr = ip<T>(Y,dr,Hessdr);
				T cga = fabs(gdr/drHdr), cgb;
				if(fsat) cgb = 1.;//fzero();     //fsat?????????????
				else cgb = 1.;//fminbd
				rmove<T> rm1 = move(Y,dr,cga), rm2 = move(Y,dr,cgb); //Yo,Ho
				T newf;
				if(fsat){
					matrix<T> ga = grad(0,rm1.Yo,FP), gb = grad(0,rm2.Yo,FP);
					T maga = sqrt(ip(rm1.Yo,ga,ga)), magb = sqrt(ip(rm2.Yo,gb,gb));
					if(maga<magb){
						mag = maga; Y = rm1.Yo; g = ga; olddr = rm1.Ho;
					}
					else{
						mag = magb; Y = rm2.Yo; g = gb; olddr = rm2.Ho;
					}
					newf = models::lad::F4lad<T>(Y,FP);
				}
				else{
					T fa = models::lad::F4lad<T>(rm1.Yo,FP), fb = models::lad::F4lad<T>(rm2.Yo,FP);
					if(fa<fb){
						newf = fa; Y = rm1.Yo; olddr = rm1.Ho;
					}
					else{
						newf = fb; Y = rm2.Yo; olddr = rm1.Ho;;
					}
					g = grad<T>(0,Y,FP); mag = sqrt(ip<T>(Y,g,g));
				}
				oldf = f; f = newf;
				if(SGP.verbose){
					printf("%d\t%e\t%e",N,mag,f);
				}
				if(N%SGP.dimension == 0) reset=1;
			}
			rf.fn = f;
			rf.Yn = Y;
			return rf;
		}
	};
	namespace invdgradMINRES{
		template<typename T>
			matrix<T> invdgrad_MINRES (matrix<T> &Y, matrix<T> &W, double tol, double dl, int nargin){ //faltan returns!!!
			if(nargin < 4) dl = 0;
			vector<T> x = 0*W, r = W;
			double rho = sqrt(ip(Y,r,r)), rho_old = rho;
			vector<T> v = r/rho;
			double gepr = (nargin>=3) ? tol : rho*SGP.gradtol;
			double beta = 0, beta_t = 0, c = -1, s = 0;
			vector<T> v_old = 0*v, w = 0*v, www = v;
			int cn = -1, Nmax = 2*SGP.dimension, posdef = 1, reset = 1;
			while(((rho>SGP.gradtol) && (cn<Nmax) && posdef && (rho <= rho_old*(1+gepr))) || reset){
				cn++; reset = 0;
				vector<T> wv = (dl == 0) ? dgrad(Y,v)-beta*v_old : dl*v+dgrad(Y,v)-beta*v_old;
				double alpha = ip(Y,v,wv);
				wv = wv-alpha*v;
				if(alpha<=0) posdef = 0;
				else{
					beta = sqrt(ip(Y,wv,wv)); v_old = v; v = wv/beta;
					double l1 = s*alpha-c*beta_t, l2 = s*beta;
					double alpha_t = -s*beta_t-c*alpha, beta_t = c*beta;
					double l0 = sqrt(alpha_t*alpha_t+beta*beta);
					c = alpha_t/l0; s = beta/l0;
					vector<T> ww = www-l1*w; ww = clamp(Y,ww); //cancellation line
					www = v - l2*w; w = ww/l0;
					x = x+rho*c*w; rho_old = rho; rho = s*rho;
				}
			}
			if(cn == 0){
				x = W;
				if(abs(dl)>0) x = x/dl;
			}
			if(SGP.verbose && !posdef) printf("invdgrad: Hessian not positive definite, MINRES terminating early");
			if(SGP.verbose && cn==Nmax) printf("invdgrad: max iterations reached inverting the hessian by MINRES");
			if(SGP.verbose && (rho>rho_old*(1+gepr))) printf("invdgrad: residual increase detected in MINRES, terminating early");
			matrix<T> H = clamp(Y,x);
			return H; //Los otros son necesarios???
		}
	};
	namespace dog{
		template<typename T>
			sgDogr<T> sg_DOG (matrix<T>& Y,const FParameters<T> &FP){
			matrix<T> g = grad(0,Y,FP);
			double mag = sqrt(ip(Y,g,g)), geps = mag*SGP.gradtol;
			T f;// = F(y);
			double feps = SGP.ftol;
			int N = 0;
			T oldf = 2*f;
			double oldmag = mag;
			if(SGP.verbose){
				printf("iter \t grad \t F(Y) \t step type \n");
				printf("%d \t %f \t %f \t none \n",N,mag,f);
			}
			double eps = std::numeric_limits<double>::epsilon();
			double delta = mag, delta_max = mag/sqrt(eps), delta_min = mag*sqrt(eps); //eps = 2.2204e-16 matlab
			while((mag>geps) || (abs(oldf/f-1)>feps)){
				N++;
				bool gradsat = (mag <= geps);
				bool fsat = (abs(oldf/f-1)<=feps);
				//fun para fminbnd
				double go = SGP.gradtol*oldmag;
				matrix<T> dr ;//= solvers::invdgradMINRES::invdgrad_MINRES<T>(Y,-g,go,delta,4);
				double gdr = ip(Y,dr,g), drHdr/* = ip(Y,dr,dgrad(Y,dr))*/, a = -gdr/drHdr;
				//a = fminbnd(); --------------------------------------
				rmove<T> Ya = move<T>(Y,dr,a);
				double b = -f/gdr;
				//b fminbnd();
				rmove<T> Yb = move<T>(Y,dr,b);
				double newf, t;
				if(fsat){
					matrix<T> ga = grad<T>(0,Ya.Yo,FP), gb = grad<T>(0,Yb.Yo,FP); //lad!!!!!!!!!!!!!!!!!!!!!!!!!!!!1
					double maga = sqrt(ip(Ya.Yo,ga,ga)), magb = sqrt(ip(Yb.Yo,gb,gb));
					if(maga<magb){
						mag = maga; Y = Ya.Yo; g = ga; t = a;
					}
					else{
						mag = magb; Y = Yb.Yo; g = gb; t = b;
					}
					newf = models::lad::F4lad<T>(Y,FP);
				}
				else{
					T fa = models::lad::F4lad<T>(Ya.Yo,FP), fb = models::lad::F4lad<T>(Yb.Yo,FP); //lad!!!!!!!!!!!!!
					if(fa<fb){
						newf = fa; Y = Ya.Yo; t = a;
					}
					else{
						newf = fb; Y = Yb.Yo; t = b;
					}
					g = grad<T>(0,Y,FP); mag = sqrt(ip(Y,g,g));
				}
				double pref = f+t*gdr+t*t*drHdr/2.;
				int steptype = 0; //stay
				double rat = (pref-f)/(newf-f);
				if((0.66<rat) && (rat<1.5)){
					delta /= 3.; steptype = 1; //good dog
				}
				else{
					delta *= 4; steptype = 2; //bad dog
				}
				delta = min(delta_max,max(delta,delta_min));
				oldf = f; f = newf;
				mag = sqrt(ip(Y,g,g));
				if(SGP.verbose){
					switch(steptype){
					case 0 : {
						printf("%d \t %f \t %f \t stay \n",N,mag,f);
						break;
					}
					case 1 : {
						printf("%d \t %f \t %f \t good dog \n",N,mag,f);
						break;
					}
					case 2 : {
						printf("%d \t %f \t %f \t bad dog \n",N,mag,f);
						break;
					}
					}
				}
			}
			sgDogr<T> sgd;
			sgd.fn = f;
			sgd.Yn = Y;
			return sgd;
		}
	};
};
//----------------------------------------------------------------------------------------------------
template<typename T>
struct rsgmin{
	T fn;
	matrix<T> Yn;
};

template<typename T>
rsgmin<T> sg_min (matrix<T> &Y0, int rc, int mode, int metric, bool verbose, 
									double ftol, double gradtol, int motion, const FParameters<T> &FP){
	matrix<T,column_major> cY0 = Y0; T *pY0 = &cY0(0,0);
	vector<T> TAU (min(Y0.size1(),Y0.size2())); T *pTAU = &TAU(0);
	identity_matrix<T> I(Y0.size1(),Y0.size1());  //Estos sizes son los correctos 
	matrix<T> Q(I), R(Y0.size1(),Y0.size2());
	if( Y0.size2() <= Y0.size1()){

		long M = cY0.size1(), N = cY0.size2(), LDA = M;
		long LWORK = N*N; //A ojo!
		vector<T> WORK (LWORK); T *pWORK = &WORK(0);
		long info;

		dgeqrf_(&M,&N,pY0,&LDA,pTAU,pWORK,&LWORK,&info);

		if(info != 0){
			printf("Error %d solving sgeqrf_.",info);
			//exit(1);
		}

		getQR(Y0,cY0,TAU,Q,R);
	}
	else{
		//Falta el m>n, solo las n primeras col de Q y las n prim filas de R son computadas
	}
	SGP.verbose = 1;
	int metarg = 0, rcarg = 0, ftolarg = 0, gradtolarg = 0, mdarg = 0, motarg = 0, verbarg, partarg = 1; //acomodo part para que de
	
	switch(motion){
	case 0 : { //Approximate
		SGP.motion = 0; motarg = 1;
		break;
	}
	case 1 : { //Exact
		SGP.motion = 1; motarg = 1;
		break;
	}
	default : {
		printf("Error selection, complex or real?.\n");
		break;
	}
	}

	switch(metric){
	case 0 : { //Flat
		SGP.metric = 0; metarg = 1;
		break;
	}
	case 1 : { //Euclidean
		SGP.metric = 1; metarg = 1;
		break;
	}
	case 2 : { //Canonical
		SGP.metric = 1; metarg = 1;
		break;
	}
	default : {
		printf("Error selection, flat-euclidean-canonical metric?.\n");
		break;
	}
	}

	switch(rc){
	case 0 : { //Complex
		SGP.complex = 0; rcarg = 1;
		break;
	}
	case 1 : { //Real
		SGP.complex = 1; rcarg = 1;
		break;
	}
	default : {
		printf("Error selection, complex or real?.\n");
		break;
	}
	}

	switch(verbose){
	case 0 : { //
		SGP.verbose = 0; verbarg = 1;
		break;
	}
	case 1 : { //
		SGP.verbose = 1; verbarg = 1;
		break;
	}
	default : {
		printf("Error selection, verbarg?.\n");
		break;
	}
	}

	switch(mode){
	case 0 : { //Dog
		SGP.Mode = 0; mdarg = 1;
		break;
	}
	case 1 : { //Prcg
		SGP.Mode = 1; mdarg = 1;
		break;
	}
	case 2 : { //Frcg
		SGP.Mode = 2; mdarg = 1;
		break;
	}
	case 3 : { //Newton
		SGP.Mode = 3; mdarg = 1;
		break;
	}
	default : {
		printf("Error selection, mdarg?.\n");
		break;
	}
	}		

	SGP.maxiter = 1000;
	//SGP.gradtol = gradtol; gradtol = 1;
	//SGP.ftol = ftol; ftolarg = 1;

	if(rcarg){
		bool flag = isreal(Y0);
		if(!flag && (1-SGP.complex)){ 
#pragma warning(Y0 has imaginary part, but real computation has been declared.  Good luck.)
		}
		else SGP.complex = !flag;
	}

	if(!metarg) SGP.metric = 1;

	if(!motarg) SGP.motion = 0;

	if(!gradtolarg) SGP.gradtol = 1e-7;

	if(!ftolarg) SGP.ftol = 1e-10;

	if(!mdarg) SGP.Mode = 0;

	if(partarg){
		//Acomodo part para que de
		vector<int> apart (2); //u = 2
		for(int i=0;i<2;i++) apart(i) = i+1;
		SGP.partition.push_back(apart);
	}
	//else SGP.partition = partition<T>(Y0);

	SGP.dimension = sgmindimension<T>(Y0);

	switch(SGP.Mode){
	case 0 : { //Dog
		solvers::dog::sgDogr<T> sgdr = solvers::dog::sg_DOG<T>(Y0,FP);
		break;
	}
	case 1 : { //Prcg
		solvers::prcg::sgprcgr<T> rp = solvers::prcg::sg_prcg<T>(Y0,FP); //Yn,fn
		rsgmin<T> rsg = {rp.fn,rp.Yn}; //Yn,fn
		return rsg;
	}
	case 2 : { //Frcg
		solvers::frcg::rfrcg<T> rf = solvers::frcg::sg_frcg<T>(Y0,FP);
		break;
	}
	case 3 : { //Newton
		solvers::newton::rnewton<T> rn = solvers::newton::sg_newton<T>(Y0,FP);
		break;
	}
	}
}
/*
	template<typename T>
	rsgmin<T> sg_min(const matrix<T> &Y0, int rc, int mode, int metric, bool verbose, 
	double ftol, double gradtol, int motion, const std::vector<vector<int> > &partition) {
	matrix<T> cY0 = Y0;
	vector<T> TAU (min(Y0.size1(),Y0.size2()));
	identity_matrix<float> I(Y0.size1(),min(Y0.size1(),Y0.size2()));
	matrix<float> Q(I), R(min(Y0.size1(),Y0.size2()),Y0.size2());
	if( Y0.size2() <= Y0.size1()){
	lapack::geqrf(Y0,TAU);
	getQR(cY0,Y0,TAU,Q,R);
	}
	else{
	//Falta el m>n, solo las n primeras col de Q y las n prim filas de R son computadas
	}
	SGP.verbose = 1;
	int metarg = 0, rcarg = 0, partarg = 0, ftolarg = 0, gradtolarg = 0, mdarg = 0, motarg = 0, verbarg;
	
	switch(motion){
	case 0 : { //Approximate
	SGP.motion = 0; motarg = 1;
	break;
	}
	case 1 : { //Exact
	SGP.motion = 1; motarg = 1;
	break;
	}
	default : {
	printf("Error selection, complex or real?.\n");
	break;
	}
	}

	switch(metric){
	case 0 : { //Flat
	SGP.metric = 0; metarg = 1;
	break;
	}
	case 1 : { //Euclidean
	SGP.metric = 1; metarg = 1;
	break;
	}
	case 2 : { //Canonical
	SGP.metric = 1; metarg = 1;
	break;
	}
	default : {
	printf("Error selection, flat-euclidean-canonical metric?.\n");
	break;
	}
	}

	switch(rc){
	case 0 : { //Complex
	SGP.complex = 0; rcarg = 1;
	break;
	}
	case 1 : { //Real
	SGP.complex = 1; rcarg = 1;
	break;
	}
	default : {
	printf("Error selection, complex or real?.\n");
	break;
	}
	}

	switch(verbose){
	case 0 : { //
	SGP.verbose = 0; verbarg = 1;
	break;
	}
	case 1 : { //
	SGP.verbose = 1; verbarg = 1;
	break;
	}
	default : {
	printf("Error selection, verbarg?.\n");
	break;
	}
	}

	switch(mode){
	case 0 : { //Dog
	SGP.Mode = 0; mdarg = 1;
	break;
	}
	case 1 : { //Prcg
	SGP.Mode = 1; mdarg = 1;
	break;
	}
	case 2 : { //Frcg
	SGP.Mode = 2; mdarg = 1;
	break;
	}
	case 3 : { //Newton
	SGP.Mode = 3; mdarg = 1;
	break;
	}
	default : {
	printf("Error selection, mdarg?.\n");
	break;
	}
	}		

	std::vector<vector<int> > part = partition; partarg = 1;
	SGP.gradtol = gradtol; gradtol = 1;
	SGP.ftol = ftol; ftolarg = 1;

	if(rcarg){
	bool flag = isreal(Y0);
	if(!flag && (1-SGP.complex)){ 
	#pragma warning(Y0 has imaginary part, but real computation has been declared.  Good luck.)
	}
	else SGP.complex = !flag;
	}

	if(!metarg) SGP.metric = 1;

	if(!motarg) SGP.motion = 0;

	if(!gradtolarg) SGP.gradtol = 1e-7;

	if(!ftolarg) SGP.ftol = 1e-10;

	if(!mdarg) SGP.Mode = 0;

	SGP.partition = partarg ? part : partition(Y0);

	//SGP.dimension = dimension(Y0); //Nombre!!

	rsgmin<T> rsg;
	switch(mode){
	case 0 : { //Dog
	solvers::dog::sgDogr<T> sgdr;
	sgdr = solvers::dog::sg_DOG(Y0);
	//rsg = sgdr;
	break;
	}
	case 1 : { //Prcg
	solvers::prcg::sgprcgr<T> rp;
	rp = solvers::prcg::sg_prcg(Y0);
	break;
	}
	case 2 : { //Frcg
	solvers::frcg::rfrcg<T> rf;
	rf = solvers::frcg::sg_frcg(Y0);
	break;
	}
	case 3 : { //Newton
	solvers::newton::rnewton<T> rn;
	rn = solvers::newton::sg_newton(Y0);
	break;
	}
	}		
	return rsg;
	}
*/
//----------------------------------------------------------------------------------------------------

template<typename T>
vector<T> ublassort (const vector<T> &v, bool asc = true){
	vector<T> ublasv = v;
	if(asc){
		T min; int indx;
		for(int i=0;i<ublasv.size()-1;i++){
			min = ublasv(i); indx = i;
			for(int j=i+1;j<ublasv.size();j++){
				if(ublasv(j) < min){
					min = ublasv(j);
					indx = j;
				}
			}
			T aux = ublasv(i);
			ublasv(i) = min;
			ublasv(indx) = aux;
		}
	}
	else{}
	return ublasv;
}

/** Y = slices(y,h)
 * 
 * This function discretizes continuous response y into h slices.
 * USAGE:
 *  - outputs:
 *    Y: discretized, integer-valued response vector. Y takes values 1:h;
 *  - inputs:
 *    y: continuos response vector.
 *    h: number of slices to use for response discretization.
 */
template<typename T>
vector<T> slices (const vector<T> &y, int h){
	vector<T> Y, A;
	A = ublassort<T>(y);
	int corte = floor(y.size()/h);
	Y = h*ones<T>(y.size());
	for(int i=0;i<y.size();i++){
		if(y(i) <= A(corte)) Y(i) = static_cast<T>(1);
	}
	for(int j=0;j<h-1;j++){
		if((y(j) > A(j*corte)) && (y(j) <= A((j+1)*corte))) Y(j) = static_cast<T>(j+1);
	}
	return Y;
}

namespace models{
	namespace lad{
		template<typename T>
			rlad<T> lad (const vector<T> &Yaux, const matrix<T> &X, int u, int morph, matrix<T> &initval){ //a cambiar!
			rlad<T> res; 
			int nslices;
			vector<T> Y;
			if(morph == 1){ //'disc'
				Y = mapdata<T>(Yaux);
				//std::cout << Y << std::endl;
				nslices = max<T>(Y);
			}
			else{
				if(nslices == 0){
#pragma warning('LDR:slices','for continuous responses, a number of slices should be given. Five slices will be used')					
					nslices = 5;
				}
				Y = slices(Yaux,nslices);
			}
			FParameters<T> FP = setdatapars(Y,X,nslices); //Igual que matlab!
			int p = X.size2();

			matrix<T> Wn = eye<T>(p);
			T fp = models::lad::F4lad<T>(Wn,FP); //Igual que matlab!
			res.fp = fp;

			if(u == p){
#pragma warning('LDR:nored','The subspace you are looking for has the same dimension as the original feature space')
				res.fp = fp;
			}
			else{ //Get initial estimate
				rsgmin<T> rsg = sg_min<T>(initval,1,1,1,false,1.e-6,1.e-6,0,FP); //W0,real,prcg,euclidean,verbose,ftol,gradtol,aproximate,partition=NULL.

#if 0		
				//**************************************************
				//GRAM-SCHMIDT ORTHONORMALIZATION.
				double ALPHA = 1., BETA = 0.;
				matrix<T,column_major> A = rsg.Yn; T *pA = &A(0,0); long LDA = A.size1();
				long N = A.size1(), K = A.size2();
				matrix<T,column_major> C (N,N); T *pC = &C(0,0); long LDC = C.size1();

				dsyrk_("U","N",&N,&K,&ALPHA,pA,&LDA,&BETA,pC,&LDC);

				long info;
				dpotrf_("U",&N,pC,&LDC,&info);
				if(info != 0){ 
					printf("Error %d in potrf.\n",info);
				}

				T *pW = &rsg.Yn(0,0); long LDB = rsg.Yn.size1();
				dtrsm_("L","U","N","N",&N,&N,&ALPHA,pC,&LDC,pW,&LDB); //al reves para que te den las dimensiones!
				//**************************************************
#endif

#if 1
				//Solucion AD-HOC.
				T aux;
				for(int i=0;i<rsg.Yn.size1();i++){
					aux = rsg.Yn(i,0);
					rsg.Yn(i,0) = rsg.Yn(i,1);
					rsg.Yn(i,1) = aux;
				}
#endif
				res.W = rsg.Yn;
				res.fp = rsg.fn;
			}
			return res;
		}

		template<typename T>
			T F4lad (const matrix<T> &W, const FParameters<T> &FP){
			int n = sum(FP.n);
			int p = FP.sigmag.size2();
			vector<T> h = FP.n;
			//elemwisediv(h,static_cast<T>(n));
			h = h/static_cast<T>(n);
			int hsz = h.size();
			vector<T> a  = vzeros<T>(hsz); //vector filas en matlab
			matrix<T> sigma_i = zeros<T>(p,p);
			for(int i=0;i<h.size();i++){
				sigma_i = FP.sigma[i];
				a(i) = logdet<T>(prod(matrix<T>(trans(W)),matrix<T>(prod(sigma_i,W))));
			}
			T f = n*p*(1+log(2*PI))/2+n*logdet<T>(FP.sigmag)/2-n/2*(logdet<T>(prod(trans(W),matrix<T>(prod(FP.sigmag,W))))-inner_prod(h,a));
			return f;
		}
		template<typename T>
			matrix<T> dF4lad (const matrix<T> &W, const FParameters<T> &FP){
			
			int p = FP.sigma[0].size2(), n = sum(FP.n); 
			std::vector<matrix<T> > a; //n.size matrices de r(W),c(W)
			matrix<T> sigma_i = zeros<T>(p,p);
			for(int i=0;i<FP.n.size();i++){
				sigma_i = FP.sigma[i];
				
				matrix<T> aux = prod(trans(W),matrix<T>(prod(sigma_i,W)));
				matrix<T> Ainv = inv<T>(aux);
				matrix<T> ai = -FP.n(i)*prod(sigma_i,matrix<T>(prod(W,Ainv)));
				a.push_back(ai);
			}
			//igual que matlab! //first = sum(a,3)
			matrix<T> first = zeros<T> (a[0].size1(),a[0].size2());;
			for(int i=0;i<a.size();i++){
				for(int j=0;j<a[0].size1();j++){
					for(int k=0;k<a[0].size2();k++){
						first(j,k) += a[i](j,k);
					}
				}
			}
			first = -first;
			matrix<T> aux1 = prod(FP.sigmag,W);
			matrix<T> aux2 = prod(trans(W),aux1);
			matrix<T> aux3 = inv<T>(aux2);
			matrix<T> aux4 = prod(W,matrix<T>(aux3));
			matrix<T> second = n*prod(FP.sigmag,aux4);
			second = -second;
			matrix<T> df = -(first+second);
			return df;
		}
	};
	namespace pfc{
		template<typename T>
			T F4pfc (const vector<T> &vals, const FParameters<T> &FP){
			matrix<T> A = FP.sigmag;
			vector<T> nj = FP.n;
			int n = sum(nj), p = A.size2(), u = vals.size();
			T f = n*p/2.*(1.+log(2.*PI))+n/2.*logdet(A);
			if(u>0.) f = f-n/2.*sum(log(vals));
			return f;
		}
	};
	namespace spfc{
		template<typename T>
			T F4spfc (const matrix<T> &D, const FParameters<T> &FP, int u){
			matrix<T> A = FP.sigmag, Afit = FP.Afit;
			int n = sum(FP.n), p = A.size2(), r = FP.r;
			matrix<T> B = FP.B;
			matrix<T> invsqrt_D = invsqrt(D);
			matrix<T> auxr = prod(invsqrt_D,prod(D,invsqrt_D));
			matrix<T> auxf = prod(invsqrt_D,prod(Afit,invsqrt_D));
			T f;
			if(u == r){
				rfirsteigs<T> rfe = firsteigs(auxr,p);
				f = n*p/2.*log(2*PI) + n/2.*(logdet(D)+sum(rfe.diagkD)); //sum(aur)
			}
			else{ 
				if(u == 0){
					rfirsteigs<T> rfe = firsteigs(prod(inv(D),A),p);
					f = n*p/2.*log(2*PI) + n/2.*(logdet(D)+sum(rfe.diagkD)); //sum(vals)
				}
				else{
					rfirsteigs<T> rfef = firsteigs(auxf,p);
					rfirsteigs<T> rfer = firsteigs(auxr,p);
					f = n*p/2.*log(2*PI) + n/2.*(logdet(D)+sum(rfer.diagkD)+subrange(rfef.diagkD,u+1,std::min(r,p)));
				}
			}
		}
	};
	namespace ipfc{
		template<typename T>
			T F4ipfc (const vector<T> &valsAfit, const FParameters<T> &FP){
			matrix<T> A = FP.sigmag;
			int n = sum(FP.n);
			int p = A.size2();
			int u = valsAfit.size();
			rfirsteigs<T> rfe = firsteigs(A,p);
			T f = (u==0) ? n*p/2.*(1.+log(sum(rfe.diagkD)/p)) : n*p/2.*(1.+log((sum(rfe.diagkD)-sum(valsAfit))/p));
			return f;
		}
	};
	namespace hlda{
		/** W: projection matrix onto the reduced subspace
		 * FParameters: structure with needed statistics
		 */
		template<typename T>
			T F4hlda (const matrix<T> &W, const FParameters<T> &FP){
			vector< matrix<T> > sigmas = FP.sigma;
			matrix<T> SIGMA = FP.sigmag;
			vector<T> nj = FP.n;
			int n = sum(nj), p = SIGMA.size2(); //cols
			vector<T> h = nj/n;
			matrix<T> a (h.size(),1);
			matrix<T> sigma_i = zeros<T>(p);
			for(int i=0;i<h.size();i++){
				sigma_i = sigmas(i);
				a(i) = logdet(prod(trans(W),prod(sigma_i,W)));
			}
			T f = n/2.*(logdet(prod(trans(W),prod(inv(SIGMA),W)))+outer_prod(h,a));
			return f;
		}
		template<typename T>
			matrix<T> dF4hlda (const matrix<T> &W, const FParameters<T> &FP){
			vector< matrix<T> > sigmas = FP.sigma;
			matrix<T> SIGMA = FP.sigmag;
			vector<T> nj = FP.n;
			int n = sum(nj), p = SIGMA.size2(); //cols
			vector<T> h = nj/n;
			matrix<T> sigma_i = zeros<T>(p), df = zeros<T>(W.size1(),W.size2());
			for(int i=0;i<h.size();i++){
				sigma_i = sigmas(i);
				df = df + h(i)*prod(sigma_i,prod(W,inv(prod(trans(W),prod(sigma_i,W))))); 
			}
			df = n*(df+prod(inv(SIGMA),prod(W,inv(prod(trans(W),prod(inv(SIGMA),W))))));
			return df;
		}
	};
	namespace epfc{
		template<typename T>
			T F4epfc (const matrix<T> &W, const FParameters<T> &FP){
			matrix<T> A = FP.sigmag;
			int n = sum(FP.n), p = A.size2();
			matrix<T> Afit = FP.Afit;
			matrix<T> sigres = A-Afit;
			T a = logdet(prod(trans(W),prod(sigres,W))), b = logdet(prod(trans(W),prod(inv(A),W)));
			T f = n*p/2.*(1.+log(2*PI)) + n/2.*(a+b);
			return f;
		}
		template<typename T>
			matrix<T> dF4epfc (const matrix<T> &W, const FParameters<T> &FP){
			matrix<T> A = FP.sigmag;
			int n = sum(FP.n);
			matrix<T> Afit = FP.Afit;
			matrix<T> sigres = A-Afit;
			matrix<T> a = prod(sigres,prod(W,inv(prod(trans(W),prod(sigres,W))))), 
				b = prod(inv(A),prod(W,inv(prod(trans(W),prod(inv(A),W)))));
			matrix<T> df = n*(a+b);
			return df;
		}
	};
	namespace core{
		template<typename T>
			T F4core (const matrix<T> &W, const FParameters<T> &FP){
			vector< matrix<T> > sigma = FP.sigma;
			matrix<T> sigmag = FP.sigmag;
			vector<T> n = FP.n;
			int sum_n = sum(n), p = sigmag.size2(); //cols
			vector<T> h = n/sum_n;
			matrix<T> a (h.size(),1);
			matrix<T> sigma_i = zeros<T>(p);
			for(int i=0;i<h.size();i++){
				sigma_i = sigma(i);
				a(i) = logdet(prod(trans(W),prod(sigma_i,W)));
			}
			T f = -sum_n/2.*(logdet(prod(trans(W),prod(sigmag,W)))-outer_prod(h,a));
			return f;
		}
		template<typename T>
			matrix<T> dF4core (const matrix<T> &W, const FParameters<T> &FP){
			vector< matrix<T> > sigma = FP.sigma;
			matrix<T> sigmag = FP.sigmag;
			vector<T> n = FP.n;
			int p = sigmag.size2();
			std::vector< matrix<T> > a (n.size()); //W
			for(int i=0;i<n.size();i++) a(i) = zeros<T>(W.size1(),W.size2());
			matrix<T> sigma_i = zeros<T>(p);
			for(int i=0;i<n.size();i++){
				sigma_i = sigma(i);
				a(i) = -prod(n(i),prod(sigma_i,prod(W,inv(prod(trans(W),prod(sigma_i,W))))));
			}
			matrix<T> first = zeros<T>(W.size1(),W.size2());
			for(int i=0;i<n.size();i++){
				first = first + a(i); //sum(a,3) de matlab, sumamos en direccion a dimension 3
			}
			matrix<T> second = sum(n)*prod(sigmag,prod(W,inv(prod(trans(W),prod(sigmag,W)))));
			matrix<T> df = -(first+second);
			return df;
		}
	};
};

#include <string>

/**
 * Some compiler flags:
 *   @param: -f Function used for the minimization, core, epfc, ipfc, spfc, pfc, lad. 
 *              Example of use: -fcore
 *   @param: -s Solver, frcg,newton,invdgradcg,invdgradMINRES,prcg,DOG.
 *              Example of use: -snewton
 *
 */

void checkargs(int argc, char *argv[]){
	//Testing compiler flags
	//printf("Amount of flags red: %d\n",argc);
	for(int i=1;i<argc;i++){
		std::string s = argv[i];
		if(s.size() <= 2) printf("Invalid argument %s.\n",argv[i]);
		else{ 
			std::string cf = s.substr(0,2), comp = "-f", aux;
			aux = s.substr(2,s.size()-1);
			if(!cf.compare(comp)){
				if(!aux.compare(std::string("core"))){}
				else{
					if(!aux.compare(std::string("epfc"))){}
					else{
						if(!aux.compare(std::string("ipfc"))){}
						else{
							if(!aux.compare(std::string("spfc"))){}
							else{
								if(!aux.compare(std::string("pfc"))){}
								else{
									if(!aux.compare(std::string("lad"))){}
									else{
										printf("No solver is identified with %s.\n",aux.c_str());
									}
								}
							}
						}
					}
				}
			}
			else{
				comp = "-s";
				if(!cf.compare(comp)){
					if(!aux.compare(std::string("frcg"))){}
					else{
						if(!aux.compare(std::string("newton"))){}
						else{
							if(!aux.compare(std::string("invdgradcg"))){}
							else{
								if(!aux.compare(std::string("invdgradMINRES"))){}
								else{
									if(!aux.compare(std::string("prcg"))){}
									else{
										if(!aux.compare(std::string("DOG"))){}
										else{
											printf("No solver is identified with %s.\n",aux.c_str());											
										}
									}
								}
							}
						}
					}
				}
			}
		}
	}
}

template<typename T>
vector<T> mean (const matrix<T> &v){
	vector<T> res (v.size1()); 
	for(int i=0;i<v.size1();i++){
		res(i) = static_cast<T>(0);
		for(int j=0;j<v.size2();j++){
			res(i) += v(i,j);
		}
		res(i) /= v.size2();
	}
	return res;
}

template<typename T>
T mean (const vector<T> &v){
	T res = static_cast<T>(0);
	for(int i=0;i<v.size();i++){
		res += v(i);	
	}
	res /= v.size();
	return res;
}

template<typename T>
struct pcres{
	matrix<T> WX,W;
};

/**[WX,W] = pc(X,u,var)
 * This function performs dimensionality reduction by principal components.
 * USAGE:
 *   - outputs:
 *    WX: projection of the predictors onto the central subspace.
 *    W: generating vectors for the central subspace.
 *  - inputs:
 *    X: predictors matrix.
 *    u: desired dimension for the central subspace.
 *    var: argument used to choose between covariance-matrix principal 
 *         components and correlation-matrix principal components. Allowed 
 *         values are:
 *          'cov': for covariance principal components,
 *          'cor': for correlation principal components.
 */
template<typename T>
pcres<T> pc (const matrix<T> &X, int u, int var){
	rfirsteigs<T> rfe;
	matrix<T> W;
	if(var == 0){ //cov
		matrix<T> sigma = get_cov(X);
		//std::cout << sigma << std::endl;		
		rfe = firsteigs(sigma,u,u); 
		W = rfe.Vk;
	}
	else{
		matrix<T> sigma = get_cov<T>(X);
		matrix<T> diagsigma = vdiagtomat(diag(X));
		matrix<T> invsqrtsigma = invsqrtm(diagsigma,diagsigma,1);
		matrix<T> Xcent = zeros<T>(X.size1(),X.size2());
		/*
			for(int i=1;i<X.size2();i++) 
			subrange(Xcent,0,Xcent.size1(),i,i+1) = subrange(X,0,X.size1(),i,i+1)-mean<T>(subrange(X,0,X.size1(),i,i+1)); //<----->
		*/
		matrix<T> Z = prod(Xcent,invsqrtsigma);
		sigma = get_cov<T>(Z);
		rfe = firsteigs(sigma,u,u); //solo W
		W = prod(invsqrtsigma,rfe.Vk);
	}
	matrix<T> rWX = prod(X,W);
	matrix<T> rW = W;
	pcres<T> pcr = {rWX,rW};
	return pcr;
}

/** OK = check_inputs(Y,X,method,morph,dim)
 *
 * Auxiliary function to check consistency of input arguments. It returns
 * true when all is right.
 *  - inputs:
 *     Y: Response vector. 
 *     X: Data matrix. Each row is an observation. 
 *     method: a string identifying the dimension reduction method to apply.
 *     morph: 'cont' for continuous responses (regression) or 'disc' for
 *     discrete responses (discrimination)
 *     dim: Dimension of the reduction subspace or string identifying the 
 *     method to infer it ('aic', 'bic', 'lrt', 'perm'). 
 */
template<typename T>
bool check_inputs (const vector<T> &Y, const matrix<T> &X, int morph, int dim){
	if(!isreal(Y)){
		printf("Response vector must be numeric.\n");
		return false;
	}
	int n = X.size1(), p = X.size2();
	if(Y.size() != n){
		printf("The number of rows in Y must equate the number of rows in X.\n");
		return false;
	}
	//Faltan cosas
	return true;
}

template<typename T>
struct resldr{
	matrix<T> WX,W;
	T L;
	int d;
};

/** [WX,W,L,d] = ldr(Y,X,method,morph,dim,varargin)
 *
 * This function implements model-based sufficient dimensionality reduction 
 * for normal densities using maximum likelihood estimation. 
 *
 * USAGE:
 *  - outputs:
 *    WX: projection of the predictors onto de estimated central subspace.
 *    W:  generating vectors for the estimated central subspace.
 *    L:  likelihood at the optimal point.
 *    d: dimension of the estimated central subspace. (This is only useful when estimating
 *       the optimal dimension describing the data, though even in that case 
 *       it can be infered from W.)
 *  - inputs:
 *    Y: response vector.
 *    X: predictors matrix (each column is expected to be a different predictor).
 *    method: the method to be used for dimension reduction. So far, accepted methods are:
 *            '0 - PFC': principal fitted components (Cook and Forzani, 2008-a)
 *            '1 - IPFC': isotonic principal components (Cook 2007)
 *            '2 - EPFC': extended principal components (Cook 2007)
 *            '3 - SPFC': structured principal components (Cook and Forzani,2009-a)
 *            '4 - CORE': covariance reduction (Cook and Forzani, 2008)
 *            '5 - LAD': likelihood acquired directions (Cook and Forzani, 2009b)
 *
 *    morph: with value 'cont' = 0, it specifies that the response Y is continuous 
 *        (in which case it is a regression problem) while with value 'disc' = 1
 *        it specifies a discrete response (and a classification problem)
 *    dim: dimension of the central subspace you are looking for, or criterion 
 *         to find it. Available citeria are 
 *         'aic', for Akaike's information criterion; 
 *         'bic', for Bayes' information criterion; 
 *         'lrt', for likelihood-ratio test; and
 *         'perm', for permutation test.
 *    varargin: group of optional arguments. group of optional arguments. They can be set 
 *         in any order. They must be given as a 'ArgumentName', 'ArgumentValue' pair. 
 *         Available options depend on the selected model:
 *              - LAD: 
 *              'nslices': to set the number of slices for continuous response slicing.	
 *                       Default value is h=5.
 *              'alpha': to set the confidence level for likelihood-ratio tests and 
 *              permutation tests. Default value is alpha=0.05.				 
 *              'npermute': to set the number of permutation  samples for permutation
 *              tests. Default value is npermute=500.
 *              'initval': to set an initial estimate to start optimization.
 *              If no value is given, an initial estimate is guessed
 *              from several computation regarding eigendecomposition of
 *              conditional and marginal covariance matrices, along with estimates 
 *              such as SIR, SAVE and DR (===NOTE: all combinations of eigenvectors 
 *              of the marginal covariance matrix are searched for the best initial 
 *              estimates by default. However, when this number of combinations is very
 *              large (actually 5000 in current implementation), only the
 *              first dim eigenvectors are searched for the best initial value.
 *
 *              other: optional arguments for Stiefel-Grassmann optimization. See SG_MIN
 *              documentation for details. In addition to the original optional
 *              inputs in SG_MIN, you can set the maximum number of iterations to be
 *              used for estimation of the central subspace.
 *              - CORE: 
 *              'nslices', 'alpha', 'npermute', 'initval' and the optional inputs for the 
 *              SG_MIN package, with the same meaning as above. 
 *              - PFC:
 *              'alpha': to set the number of samples for permutation tests.
 *              Default value is alpha=0.05.
 *              'fy': to set a regression matrix to estimate the fitted
 *              covariance matrix. If no such matrix is given, a polynomial
 *              basis of order r is used, with r=max(dim+1,3) where dim is
 *              the dimension of the reduced subspace to look for.
 *              - IPFC: 
 *              'alpha' and 'fy', as above for PFC. 
 *              - SPFC: 
 *              'alpha' and 'fy', as above for PFC. 
 *              - EPFC: 
 *              'alpha' and 'fy', as above for PFC, and 'initval' as in LAD
 *              and CORE.
 *
 * EXAMPLES:
 *    - for regression, slicing the continous response into 10 slices, 
 *      and looking for a subspace of dimension 3:
 *      [WX,W,L,d] = ldr(Y,X,'LAD','cont',3,'nslices',10)
 *    - same as above, but estimating the dimension by using Likelihood-ratio 
 *      test with confidence level of 0.05:
 *      [WX,W,L,d] = ldr(Y,X,'LAD','cont','lrt','nslices',10,'alpha',0.05)
 *    - for classification,  looking for a subspace of dimension 2 and using W0 as initial value:
 *      [WX,W,L,d] = ldr(Y,X,'LAD','disc',2,'initval',W0)
 */
template<typename T>
resldr<T> ldr (const vector<T> &Y, const matrix<T> &X, int method, int morph, int dim, matrix<T> &initval){
	if(check_inputs(Y,X,morph,dim)){
		//Leemos mas de 5 parametros
		switch(method){
		case 5 : { //lad
			models::lad::rlad<T> ladr = models::lad::lad<T>(Y,X,dim,morph,initval); //[W,fn<matrix>;fp<scalar>]
			matrix<T> rWX = prod(X,ladr.W);
			matrix<T> rW = ladr.W;
			T rL = -ladr.fp;	
			resldr<T> rldr = {rWX,rW,rL,dim}; //WX,W,L,d
			return rldr;
		}
		default : {
			printf("No method asociated.\n");
			//exit(1);
		}
		}
	}
}

template<typename T>
struct readfileres{
	matrix<T> X;
	vector<T> Y;
};

template<typename T>
readfileres<T> readfile (const char *fname){
	char line[200];
	FILE *f = fopen (fname,"r");
	if(f == NULL) printf("No %s file opened.\n",fname);
	else{
		int nlines = -1;
		while(!feof(f)){ 
			fgets(line,200,f); //leo la linea
			nlines++;
		}
		rewind(f); //al inicio
		vector<T> Y (nlines); matrix<T> X (nlines,13); //resize
		char numb [20]; //numero en si
		for(int l=0;l<20;l++) numb[l] = '\0'; //<---------------
		for(int i=0;i<nlines;i++){
			fgets(line,200,f); //leo la linea
			int j = 0, k = 0;
			bool first = true;
			int cols = 0;
			while(line[j] != '\0'){ //fin de linea y retorno de carro
				if(first){ //Y
					if(line[j] == ' '){
						numb[k] = '\0';
						k = 0;
						Y(i) = atof(numb);
						first = false;
					}
					else{
						numb[k] = line[j];
						k++;
					}
					j++;
				}
				else{ //X
					if(line[j] == ' '){
						numb[k] = '\0';
						k = 0;
						X(i,cols) = atof(numb);
						cols++;
					}
					else{
						numb[k] = line[j];
						k++;
					}
					j++;
				}
				//printf("%d ",j);
			}
			first = true;
			//printf("%d\n",i);
		}
		fclose(f);
		readfileres<T> rf;
		rf.Y = Y;
		rf.X = X;
		return rf;
	}
}


#endif
