#include "utilities.h"
#include "optimiz.h"
#include "lk.h"
#include "free.h"
#include "models.h"
#include "rates.h"
#include "bfgs.h"


#define EPS   3.0e-8 //originally 8
#define TOLX 1.0e-7
static double sqrarg;
#define SQR(a) ((sqrarg=(a)) == 0.0 ? 0.0 : sqrarg*sqrarg)


///////////////////////////////////////////////////////////
void Copy_Tree_To_Array(t_tree *tree, phydbl *x)
{
	int pc = 0;
	int i;
#ifdef ENABLE_MBL
	int mixture;
#endif
	if (tree->mod->s_opt->opt_bl)
	{
		For(i, 2*tree->n_otu-3) //branches
		{
#ifdef ENABLE_MBL
			For(mixture, n_l)
			{	x[pc] = tree->t_edges[i]->l[mixture];
				pc++;
			}
#endif
#ifndef ENABLE_MBL
			x[pc] = tree->t_edges[i]->l;
			pc++;
#endif
		}
	}
	if(tree->mod->s_opt->opt_kappa) //kappa
	{
		x[pc] = tree->mod->kappa;
		pc++;
	}
	if (tree->mod->s_opt->opt_alpha)
	{
		x[pc] = tree->mod->alpha;
		pc++;
	}
	if (tree->mod->s_opt->opt_rr)
	{
		For(i, tree->mod->n_diff_rr)
		{
			x[pc] = tree->mod->rr_val[i];
			pc++;
		}
	}
#ifdef ENABLE_MBL
	if (n_l > 1 && tree->mod->s_opt->opt_bl_mixtures) //BL props
	{	For(mixture, n_l - 1)//10.12.2010
		{	x[pc] = tree->mod->bl_mixture_wts[mixture];
			pc++;
		}
	}
#endif
}

/////////////////////////////////////////////////////////////
void Copy_Array_To_Tree(phydbl *x, t_tree *tree)
{
	int pc = 0;
	int i;
#ifdef ENABLE_MBL
	int mixture = 0;
#endif
	For(i, 2*tree->n_otu-3)
	{
#ifdef ENABLE_MBL
		phydbl wtsum = 0.0;
		For(mixture, n_l-1)
		{	tree->t_edges[i]->l[mixture] = x[pc];
			pc++;
			wtsum += x[pc];
		}
		tree->t_edges[i]->l[n_l - 1] = wtsum - 1.0;
#endif
#ifndef ENABLE_MBL
		tree->t_edges[i]->l = x[pc];
		pc++;
#endif
	}
	if(tree->mod->s_opt->opt_kappa)
	{
		tree->mod->kappa = x[pc];
		pc++;
	}
	if (tree->mod->s_opt->opt_alpha)
	{
		tree->mod->alpha = x[pc];
		pc++;
	}
	if (tree->mod->s_opt->opt_rr)
	{
		For(i, tree->mod->n_diff_rr)
		{
			tree->mod->rr_val[i] = x[pc];
			pc++;
		}
	}
#ifdef ENABLE_MBL
	if (n_l > 1 && tree->mod->s_opt->opt_bl_mixtures)
	{
		phydbl wtsum = 0.0;
		For(mixture,n_l-1)//10.12.2010
		{	tree->mod->bl_mixture_wts[mixture] = x[pc];
			pc++;
			wtsum += tree->mod->bl_mixture_wts[mixture];
		}
		tree->mod->bl_mixture_wts[n_l-1] = 1.0 - wtsum;
	}
#endif
}

/*
* Victor's note: this algorithm comes from Chapter 9.7 of "Numerical Recipes in C", and was
* implemented in PhyML prior to my work.
*/
#define ALF 1.0e-4
void Lnsrch_Generic(t_tree *tree, double fold, double *g, double *p, double *x,
		double *f, double stpmax, int *check, phydbl (*func)(t_tree *tree))
{
	int i,j;
	double a,alam,alam2,alamin,b,disc,f2,fold2,rhs1,rhs2,slope,sum,temp,test,tmplam;
	double *local_xold;

	int n = How_Many_Free_Params(tree);

	alam = alam2 = f2 = fold2 = tmplam = .0;

	local_xold = (double *)mCalloc(n,sizeof(double));

	// Copy values from tree into local_xold
	// thus replacing this simpler line from numerical recipes:
	// 	For(i,n) local_xold[i] = xold[i];
	Copy_Tree_To_Array(tree, local_xold);

	*check=0;
	for(sum=0.0,i=0;i<n;i++) sum += p[i]*p[i];
	sum=sqrt(sum);
	if(sum > stpmax)
		for(i=0;i<n;i++) p[i] *= stpmax/sum;
	for(slope=0.0,i=0;i<n;i++)
		slope += g[i]*p[i];
	test=0.0;
	for(i=0;i<n;i++)
	{
		temp=fabs(p[i])/MAX(fabs(local_xold[i]),1.0);
		if (temp > test) test=temp;
	}
	alamin=TOLX/test;
	alam=1.0;

	for (;;)
	{
		for(i=0;i<n;i++)
		{
			// THis for block is for my August 2011 publication
			if ((tree->io->fix_param == i) && (tree->io->opt_algorithm == 1))
			{
				x[i] = x[i];
			}
			else{
				x[i]=fabs(local_xold[i]+alam*p[i]);
			}
		}

		// Copy local_xold+alam*p back into the tree
		Copy_Array_To_Tree(x, tree);
	    tree->mod->update_eigen = 1;
		*f=(*func)(tree);

		// if convergence on delta-x
		if (alam < alamin)
		{
			for (i=0;i<n;i++) x[i]=local_xold[i];
			Copy_Array_To_Tree(x, tree);

			*check=1;
			Free(local_xold);
			return;
		}
		// sufficient function decrease.  Backtrack.
		else if (*f <= fold+ALF*alam*slope )
		{
			// The following line...
			// For(i,n) xold[i] = local_xold[i];
			// ...is replaced by this block:
			Copy_Array_To_Tree(local_xold, tree);

			Free(local_xold);
			return;
		}
		else
		{
			if (alam == 1.0)
				tmplam = -slope/(2.0*(*f-fold-slope));
			else
			{
				rhs1 = *f-fold-alam*slope;
				rhs2=f2-fold2-alam2*slope;
				a=(rhs1/(alam*alam)-rhs2/(alam2*alam2))/(alam-alam2);
				b=(-alam2*rhs1/(alam*alam)+alam*rhs2/(alam2*alam2))/(alam-alam2);
				if (a == 0.0) tmplam = -slope/(2.0*b);
				else
				{
					disc=b*b-3.0*a*slope;
					if (disc<0.0) tmplam = 0.5*alam;
					else if(b <= 0.0) tmplam=(-b+sqrt(disc))/(3.0*a);
					//else tmplam = -slope/(b+sqrt(disc));
					else tmplam = (-b+sqrt(disc))/(3.0*a);
				}
				if (tmplam>0.5*alam) tmplam=0.5*alam;
			}
		}
		alam2=alam;
		f2 = *f;
		fold2=fold;
		alam=MAX(tmplam,0.1*alam);
	}
	PhyML_Printf("\n. bfgs 210, calling Free(local_xold)", *f);
	Free(local_xold);
}

#undef ALF

//
// Calculates the gradient of the likelihood function.
// derivatives will contain the gradient once the function is finished.
// size_of_der is the number of free parameters
void Num_Derivative(t_tree *tree, phydbl stepsize,
		phydbl (*func)(t_tree *tree), phydbl *derivatives, int size_of_der)
{

#ifdef MPI
	if (Global_myRank == 0)
	{	int mpi_message = Broadcast_mpi_msg(tree, GO_DFUNC);
	}
#endif

	int i, j;

#ifdef ENABLE_MBL
	int mixture;
#endif
	phydbl err,f0;
	f0 = (*func)(tree);
	int pc = 0; // pc = parameter counter

	if (tree->mod->s_opt->opt_bl)
	{
	#ifdef MPI
		int starthere;
		int stophere;
		if (tree->io->parallelism_type == PARALLEL_OVER_BRANCHES)
		{
			starthere = Global_myStartEdge;
			stophere = Global_myStopEdge;
		}
		else
		{
			starthere = 0;
			stophere = 2*tree->n_otu-4;
		}
		for(i = starthere; i <= stophere; i++)
	#endif
	#ifndef MPI
		For(i, 2*tree->n_otu-3)
	#endif
		{
	#ifdef ENABLE_MBL
			For(j, n_l)
			{
				derivatives[pc] = Num_Derivatives_One_Param(func,
						tree,
						f0,
						&tree->t_edges[i]->l[j],
						stepsize,
						&err,
						0
				);
				pc++;
			}
	#endif
	#ifndef ENABLE_MBL
			// Here we use the analytical calucation of derivates for branch lengths
			//derivatives[pc] = Lk_Prime(tree, tree->t_edges[i]);
			//derivatives[pc] = Lk(tree);

			// For my August 2011 publication:
			if ((tree->io->fix_param == pc) && (tree->io->opt_algorithm == 1))
			{
				derivatives[pc] = 0.0;
			}
			else
			{
				derivatives[pc] = Num_Derivatives_One_Param(func,
						tree,
						f0,
						&tree->t_edges[i]->l,
						stepsize,
						&err,
						0
				);
			}
			pc++;
	#endif
		}
	}

#ifdef MPI
	if (Global_myRank == 0)
	{
#endif

	if (tree->mod->s_opt->opt_kappa)
	{
		derivatives[pc] = Num_Derivatives_One_Param(func,
				tree,
				f0,
				&tree->mod->kappa,
				//param+pc,
				stepsize,
				&err,
				1);
		pc++;
	}
	if (tree->mod->s_opt->opt_alpha)
	{
		derivatives[pc] = Num_Derivatives_One_Param(func,
				tree,
				f0,
				&tree->mod->alpha,
				//param+pc,
				stepsize,
				&err,
				1);
		pc++;
	}

	if (tree->mod->s_opt->opt_rr)
	{
		For(i, tree->mod->n_diff_rr)
		{
			derivatives[pc] = Num_Derivatives_One_Param(func,
					tree,
					f0,
					&tree->mod->rr_val[i],
					//param+pc,
					stepsize,
					&err,
					1);
			pc++;
		}
	}

	// find the derivative of each mixture component weight
#ifdef ENABLE_MBL
	if (n_l > 1 && tree->mod->s_opt->opt_bl_mixtures)
	{
		For(mixture, n_l-1)//10.12.2010
		{
			derivatives[pc] = Num_Derivatives_One_Param(func,
					tree,
					f0,
					&tree->mod->bl_mixture_wts[mixture],
					//param+pc,
					stepsize,
					&err,
					1);
			pc++;
		}
	}
#endif

#ifdef MPI
	} // end for rank == 0
	Reduce_derivatives(derivatives, size_of_der);

	for (i=0; i<size_of_der; i++)
	{
		PhyML_Printf("\n . util 5934, derivatives[%d] = %f", i, derivatives[i]);
	}
#endif
}


int How_Many_Free_Params(t_tree *tree)
{
	int n = 0; 	// VHS 10.10.2010
	if(tree->mod->s_opt->opt_kappa)
	{	n += 1;
	}
	if(tree->mod->s_opt->opt_alpha) //gamma
	{	n += 1;
	}
	if (tree->mod->s_opt->opt_rr) // GTR
	{	n += tree->mod->n_diff_rr;
	}
#ifdef ENABLE_MBL
	if (n_l > 1 && tree->mod->s_opt->opt_bl_mixtures)
	{	n += ((2*tree->n_otu-3) * n_l) + n_l - 1;// - 1; //10.12.2010
	}
	else if (tree->mod->s_opt->opt_bl)
	{	n += (2*tree->n_otu-3) * n_l;
	}
#endif
#ifndef ENABLE_MBL
	if (tree->mod->s_opt->opt_bl)
	{	n += 2*tree->n_otu-3;
	}
#endif

	return n;
}

void Print_Param_Trace(t_tree *tree, int its)
{
	  if (tree->io->print_bl_trace == 1)
	  {
			int qqq;
			if (tree->mod->s_opt->opt_bl == 1)
			{
				For(qqq, 2*tree->n_otu-3)
				{
					PhyML_Printf("\n. optimiz.c 595 branch= %d iter= %d length= %f lnL= %f *", qqq, its, tree->t_edges[qqq]->l, tree->c_lnL);
				}
			}

			if (tree->mod->s_opt->opt_rr)
			{
				For(qqq, tree->mod->n_diff_rr){
					PhyML_Printf("\n. optimiz.c 595 rr[%d]= -%d iter= %d val= %f lnL= %f *", qqq, qqq+1, its, tree->mod->rr_val[qqq], tree->c_lnL);
				}
			}
			if (tree->mod->s_opt->opt_alpha)
			{
				PhyML_Printf("\n. optimiz.c 595 alpha= -600 iter= %d val= %f lnL= %f *", its, tree->mod->alpha, tree->c_lnL);
			}
	  }
}


// In this method, "*tree" substitutes for "p" in the original numerical recipe.  Whereas in the
// recipe, we iterate through p values by considering p[0]...p[n-1], here we need to be more
// careful and treat each p value differently, depending if its a branch length,
// a mixture weight, ts/tv value, etc.
//
// The original recipe defines "n" as a function input.  Here, "n" is calculated dynamically
// by querying the tree object for which parameters should be optimized.
void BFGS_Full(t_tree *tree, double gtol, double step_size,
		double(*func)(), void (*dfunc)(), void (*lnsrch)(),int *failed)
{
	//if (tree->io->print_opttrace)
	//{	Write_optimization_algorithm_trace(tree);
	//}

	int check,i,its,j,pc;
	double den,fac,fad,fae,fp,stpmax,sum=0.0,sumdg,sumxi,temp,test,fret;
	double *dg,*g,*hdg,**hessin,*pnew,*xi;

	// --> Compute n
	int n = How_Many_Free_Params(tree);

	// --> Use n to make data structures.
	g    = (double *)mCalloc(n,sizeof(double ));
	hessin = (double **)mCalloc(n,sizeof(double *));
	For(i,n) hessin[i] = (double *)mCalloc(n,sizeof(double));
	dg   = (double *)mCalloc(n,sizeof(double ));
	pnew = (double *)mCalloc(n,sizeof(double ));
	hdg  = (double *)mCalloc(n,sizeof(double ));
	xi   = (double *)mCalloc(n,sizeof(double ));

    tree->mod->update_eigen = 1;
	fp=(*func)(tree); // Calculate the starting function value
	(*dfunc)(tree,step_size,func,g,n); // Calculate the starting gradient

	// Print the gradient
	/*
	PhyML_Printf("\n. BFGS starting gradient = ");
	for(i=0;i<n;i++)
	{	PhyML_Printf("[%d] = %f, ", i, g[i]);
	}
	PhyML_Printf("\n");
	 */

	for (i=0;i<n;i++) // This for-loop initializes the inverse Hessian to the unit matrix
	{
		//for (j=0;j<=n;j++) hessin[i][j]=0.0; // Victor says: this line incorrectly uses <= rather than <, and thus caused bugs with free-ing memory later.
		for (j=0;j<n;j++) hessin[i][j]=0.0; // This way is correct.
		hessin[i][i]=1.0; // initialize hessin to the unit matrix.
		xi[i] = -g[i]; // initial line direction
	}
#ifdef ENABLE_MBL
	if (tree->mod->s_opt->opt_bl)
	{
		For(i, 2*tree->n_otu-3)
		{	For(j, n_l)
			{
				sum += tree->t_edges[i]->l[j] * tree->t_edges[i]->l[j];
				pc++;
			}
		}
	}
#endif
#ifndef ENABLE_MBL
	if (tree->mod->s_opt->opt_bl)
	{
		For(i, 2*tree->n_otu-3)
		{	sum += tree->t_edges[i]->l * tree->t_edges[i]->l;
			pc++;
		}
	}
#endif
	if (tree->mod->s_opt->opt_kappa)
	{	sum += tree->mod->kappa;
	}
	if(tree->mod->s_opt->opt_alpha)
	{	sum += tree->mod->alpha;
	}
	if (tree->mod->s_opt->opt_rr) // GTR
	{	int i;
		For(i, tree->mod->n_diff_rr)
		{	sum += tree->mod->rr_val[i];
		}
	}
#ifdef ENABLE_MBL
	if (n_l > 1 && tree->mod->s_opt->opt_bl_mixtures)
	{	For(j,n_l)
		{	sum += tree->mod->bl_mixture_wts[j] * tree->mod->bl_mixture_wts[j];
		}
	}
#endif

	stpmax=tree->io->lnsearch_stepmax*MAX(sqrt(sum),(double)n);

	// Remember the best tree as we go.
#ifndef ENABLE_MBL
	phydbl *best_bl = (phydbl *)mCalloc(2*tree->n_otu-3,sizeof(phydbl));
#else
	phydbl *best_bl = (phydbl *)mCalloc((2*tree->n_otu-3)*n_l,sizeof(phydbl));
#endif
    Record_Br_Len(best_bl, tree);
    model *best_model = Copy_Model(tree->mod);
	phydbl best_lnl = tree->c_lnL;

	//if (tree->io->print_bl_trace == 1)
	//{	Print_Param_Trace(tree,0);
	//}

	for(its=0;its<=tree->io->bfgs_itmax;its++) // Main loop over iterations
	{
		  if (tree->io->print_bl_trace == 1)
		  {	Print_Param_Trace(tree,its);
		  }


		// Is our current value better than our best value? If so, save it.
		//PhyML_Printf("\n. BFGS iteration %d, lnL = %f", its, tree->c_lnL);
		if (tree->c_lnL > best_lnl)
		{
			if(tree->io->print_trace)
			{

			  if (tree->io->print_bl_trace == 1)
			  {	Print_Param_Trace(tree,its);
			  }

#ifdef ENABLE_MBL
				char *s = Write_Tree(tree,-1);
#endif
#ifndef ENABLE_MBL
				char *s = Write_Tree(tree);
#endif
				PhyML_Fprintf(tree->io->fp_out_trace,"[%f]%s\n",tree->c_lnL,s); fflush(tree->io->fp_out_trace);
				Free(s);
			}

			// Save the best so-far solution
			best_lnl = tree->c_lnL;
			Record_Model(tree->mod, best_model);
			Record_Br_Len(best_bl, tree);

			Print_Lk(tree,"[BFGS]");
			if (tree->mod->s_opt->opt_bl)
			{	PhyML_Printf("\n.\t\t\t[Branch lengths     ][ %f ]", Get_Tree_Size(tree) );
			}
#ifdef ENABLE_MBL
			if(tree->mod->s_opt->opt_bl_mixtures)
			{
				PhyML_Printf("\n.\t\t\t[BLs & mixtures     ]");
				PhyML_Printf("[ ");
				int i;
				For(i, n_l)
				{
					PhyML_Printf("%.3f ", tree->mod->bl_mixture_wts[i]);
				}
				PhyML_Printf("]");
			}
#endif
			if (tree->mod->s_opt->opt_kappa)
			{	PhyML_Printf("\n.\t\t\t[ts/tv              ][ %f ]", tree->mod->kappa );
			}
			if(tree->mod->s_opt->opt_alpha)
			{	PhyML_Printf("\n.\t\t\t[alpha              ][ %f ]", tree->mod->alpha );
			}
			if (tree->mod->s_opt->opt_rr)
			{	int i;
				PhyML_Printf("\n.\t\t\t[GTR parameters     ]");
				PhyML_Printf("[ ");
				For(i, tree->mod->n_diff_rr){
					PhyML_Printf("%.3f ", tree->mod->rr_val[i]);
				}
				PhyML_Printf(" ]");
			}
		} // end if(tree->c_lnL > best_lnl)

		// VHS: I added this 9/2011

		if (tree->c_lnL < best_lnl)
		{
			// Move the best solution into the current tree
			Restore_Br_Len(best_bl, tree);
			Record_Model(best_model,tree->mod);
		    tree->mod->update_eigen = 1;
			Lk(tree);

			// make the step size smaller
			stpmax=tree->io->lnsearch_stepmax*0.5*MAX(sqrt(sum),(double)n);
		}

		// The new Lk() evaluation occurs in lnsrch; save the lnL value in fp for the next
		// line search.  It is usually safe to ignore the value of check.
		lnsrch(tree,fp,g,xi,pnew,&fret,stpmax,&check,func);
		fp = fret;

		// Update the line direction and the current point.
		pc = 0;
#ifdef ENABLE_MBL
		For(i, 2*tree->n_otu-3)
		{	For(j, n_l)
			{
				xi[pc] = pnew[pc] - tree->t_edges[i]->l[j];
				tree->t_edges[i]->l[j] = pnew[pc];
				pc++;
			}
		}
#endif
#ifndef ENABLE_MBL
		if (tree->mod->s_opt->opt_bl)
		{
			For(i, 2*tree->n_otu-3)
			{
				xi[pc] = pnew[pc] - tree->t_edges[i]->l;
				tree->t_edges[i]->l = pnew[pc];
				pc++;
			}
		}
#endif
		if (tree->mod->s_opt->opt_kappa)
		{
			xi[pc] = pnew[pc] - tree->mod->kappa;
			tree->mod->kappa = pnew[pc];
			pc++;
		}
		if (tree->mod->s_opt->opt_alpha)
		{
			xi[pc] = pnew[pc] - tree->mod->alpha;
			tree->mod->alpha = pnew[pc];
			pc++;
		}
		if (tree->mod->s_opt->opt_rr)
		{
			For(i, tree->mod->n_diff_rr)
			{
				xi[pc] = pnew[pc] - tree->mod->rr_val[i];
				tree->mod->rr_val[i] = pnew[pc];
				pc++;
			}
		}
#ifdef ENABLE_MBL
		if (n_l > 1 && tree->mod->s_opt->opt_bl_mixtures)
		{
			phydbl wtsum = 0.0;
			For(j, n_l - 1) //10.12.2010
			{	xi[pc] = pnew[pc] - tree->mod->bl_mixture_wts[j];
				tree->mod->bl_mixture_wts[j] = pnew[pc];
				wtsum += tree->mod->bl_mixture_wts[j];
				pc++;
			}
			tree->mod->bl_mixture_wts[n_l-1] = 1.0 - wtsum;
		}
#endif
		test=0.0; // Test for convergence on delta-x
		pc = 0;
#ifdef ENABLE_MBL
		if (tree->mod->s_opt->opt_bl)
		{
			For(i, 2*tree->n_otu-3)
			{	For(j, n_l)
				{
					temp=fabs(xi[pc])/MAX(fabs(tree->t_edges[i]->l[j]),1.0);
					pc++;
					if (temp > test) test=temp;
				}
			}
		}
#endif
#ifndef ENABLE_MBL
		if (tree->mod->s_opt->opt_bl)
		{
			For(i, 2*tree->n_otu-3)
			{
				temp=fabs(xi[pc])/MAX(fabs(tree->t_edges[i]->l),1.0);
				pc++;
				if (temp > test) test=temp;
			}
		}
#endif
		if (tree->mod->s_opt->opt_kappa)
		{
			temp=fabs(xi[pc])/MAX(fabs(tree->mod->kappa),1.0);
			pc++;
			if (temp > test) test=temp;
		}
		if (tree->mod->s_opt->opt_alpha)
		{
			temp=fabs(xi[pc])/MAX(fabs(tree->mod->alpha),1.0);
			pc++;
			if (temp > test) test=temp;
		}
		if (tree->mod->s_opt->opt_rr)
		{
			For(i, tree->mod->n_diff_rr)
			{
				temp = fabs(xi[pc])/MAX(fabs(tree->mod->rr_val[i]), 1.0);
				pc++;
				if (temp > test) test = temp;
			}
		}
#ifdef ENABLE_MBL
		if (n_l > 1 && tree->mod->s_opt->opt_bl_mixtures)
		{	For(j,n_l - 1) //10.12.2010
			{	temp=fabs(xi[pc])/MAX(fabs(tree->mod->bl_mixture_wts[j]),1.0);
				pc++;
				if (temp > test) test=temp;
			}
		}
#endif


		// Have we converged?
		if ( (test < TOLX) && (its > 1) )
		{
			if (tree->c_lnL < best_lnl)
			{
				Restore_Br_Len(best_bl, tree);
				Record_Model(best_model,tree->mod);
			    tree->mod->update_eigen = 1;
				Lk(tree);
			}
		    tree->mod->update_eigen = 1;

		    // for testing in my 2011 publication

		    /*
			if (tree->io->print_ml_directionality == 1)
			{	For(i, 2*tree->n_otu-3)
				{	char buf[256];
					sprintf(buf, "[brlen %d           ][ %.3f -> %.3f ]", tree->t_edges[i]->num, qqq[i], tree->t_edges[i]->l);
					Print_Lk(tree,buf);
				}
			}
			*/
			//free(qqq);

			//PhyML_Printf("\n. bfgs.c 736");
			(*func)(tree);
			//PhyML_Printf("\n. bfgs.c 738");
			For(i,n) free(hessin[i]);
			//PhyML_Printf("\n. bfgs.c 740");
			free(hessin);
			//PhyML_Printf("\n. bfgs.c 742");
			free(xi);
			//PhyML_Printf("\n. bfgs.c 744");
			free(pnew);
			//PhyML_Printf("\n. bfgs.c 746");
			free(hdg);
			//PhyML_Printf("\n. bfgs.c 748");
			free(g);
			//PhyML_Printf("\n. bfgs.c 750");
			free(dg);
			//PhyML_Printf("\n. bfgs.c 752");
			Free(best_bl);
			//PhyML_Printf("\n. bfgs.c 754");
			Free_Model_Complete(best_model);
			//PhyML_Printf("\n. bfgs.c 747");

			if(its == 0) // Victor's note: this if-block is not indicated in the original numerical recipe.
			{
				// Why is the execution reaching this point?
				//PhyML_Printf("\n. WARNING : BFGS failed ! \n");
				*failed = 1;
			}
			PhyML_Printf("\n. BFGS found an optimum, iteration %d\n", its);

			return;
		}

		for (i=0;i<n;i++) dg[i]=g[i]; // Save the old gradient

		(*dfunc)(tree,step_size,func,g,n); // Get the new gradient

		test=0.0; // Test for convergence on zero gradient.  At the end of this block, "test" will
				  // equal the highest gradient value
		den=MAX(fret,1.0);
		// Now use the gradient to propose a new solution based on our current solution.
		pc = 0;
#ifdef ENABLE_MBL
		if (tree->mod->s_opt->opt_bl)
		{
			For(i, 2*tree->n_otu-3)
			{	For(j, n_l)
				{
					temp=fabs(g[pc])*MAX(fabs(tree->t_edges[i]->l[j]),1.0)/den; //10.16.2010 - should the MAX function call be replaced with just the p[i] value (the BL in this case) ?
					pc++;
					if (temp > test) test=temp;
				}
			}
		}
#endif ENABLE_MBL
#ifndef ENABLE_MBL
		if (tree->mod->s_opt->opt_bl)
		{
			For(i, 2*tree->n_otu-3)
			{
				temp=fabs(g[pc])*MAX(fabs(tree->t_edges[i]->l),1.0)/den;
				pc++;
				if (temp > test) test=temp;
			}
		}
#endif
		if (tree->mod->s_opt->opt_kappa)
		{
			temp=fabs(g[pc])*MAX(fabs(tree->mod->kappa),1.0)/den;
			//temp=fabs(g[pc])*fabs(tree->mod->kappa)/den;
			pc++;
			if (temp > test) test=temp;
		}
		if (tree->mod->s_opt->opt_alpha)
		{
			temp=fabs(g[pc])*MAX(fabs(tree->mod->alpha),1.0)/den;
			pc++;
			if (temp > test) test=temp;
		}
		if (tree->mod->s_opt->opt_rr)
		{
			For(i, tree->mod->n_diff_rr)
			{
				temp=fabs(g[pc])*MAX(fabs(tree->mod->rr_val[i]),1.0)/den;
				pc++;
				if (temp > test) test=temp;
			}
		}
#ifdef ENABLE_MBL
		if (n_l > 1 && tree->mod->s_opt->opt_bl_mixtures)
		{	For(j,n_l - 1) //10.12.2010
			{	temp=fabs(g[pc])*MAX(fabs(tree->mod->bl_mixture_wts[j]),1.0)/den;
				pc++;
				if (temp > test) test=temp;
			}
		}
#endif

		// test = the steepest gradient of all gradients
		if (test < gtol)
		{

			if (tree->c_lnL < best_lnl)
			{
				Restore_Br_Len(best_bl, tree);
				Record_Model(best_model,tree->mod);
			    tree->mod->update_eigen = 1;
				Lk(tree);
			}

		    tree->mod->update_eigen = 1;
			(*func)(tree);

		    // for testing in my 2011 publication
			/*
			if (tree->io->print_ml_directionality == 1)
			{	For(i, 2*tree->n_otu-3)
				{	char buf[256];
					sprintf(buf, "[brlen %d           ][ %.6f -> %.6f ]", tree->t_edges[i]->num, qqq[i], tree->t_edges[i]->l);
					Print_Lk(tree,buf);
				}
			}
			*/
			//free(qqq);

			//PhyML_Printf("\n. bfgs.c 852");
			For(i,n) Free(hessin[i]);
			free(hessin);
			free(xi);
			free(pnew);
			free(hdg);
			free(g);
			free(dg);
			Free(best_bl);
			Free_Model_Complete(best_model);
			//PhyML_Printf("\n. bfgs.c 862");
			return;
		}

		for (i=0;i<n;i++) dg[i]=g[i]-dg[i]; // Compute difference of gradients

		for (i=0;i<n;i++) // Compute the difference times current matrix
		{	hdg[i]=0.0;
			for (j=0;j<n;j++) hdg[i] += hessin[i][j]*dg[j];
		}


		fac=fae=sumdg=sumxi=0.0; // Calculate dot products for the denominators
		for (i=0;i<n;i++)
		{
			fac += dg[i]*xi[i];
			fae += dg[i]*hdg[i];
			sumdg += SQR(dg[i]);
			sumxi += SQR(xi[i]);
		}
		if(fac*fac > EPS*sumdg*sumxi) // Skip the update if fac is not sufficiently positive
		{
			//PhyML_Printf(". debug optimiz.c 2144\n");
			fac=1.0/fac;
			fad=1.0/fae;
			// The following vector makes BFGS different from DFP:
			for (i=0;i<n;i++) dg[i]=fac*xi[i]-fad*hdg[i];
			for (i=0;i<n;i++) // The BFGS updating formula:
			{
				for (j=0;j<n;j++)
				{
					hessin[i][j] += fac*xi[i]*xi[j]
												 -fad*hdg[i]*hdg[j]+fae*dg[i]*dg[j];
				}
			}
		}

		for (i=0;i<n;i++) // Now calculate the next direction to go:
		{
			xi[i]=0.0;
			for (j=0;j<n;j++)
			{	xi[i] -= hessin[i][j]*g[j];
			}
		}

	} // and go back for another iteration


	PhyML_Printf("\n. WARNING, BFGS ran %d iterations without finding an optimum.  Consider re-running your analysis using the option '--bfgs_itmax' with a larger value.\n", tree->io->bfgs_itmax);


	if (tree->c_lnL < best_lnl)
	{
		// Move the best solution into the current tree
		Restore_Br_Len(best_bl, tree);
		Record_Model(best_model,tree->mod);
	    tree->mod->update_eigen = 1;
		Lk(tree);
	}


	*failed = 1;

	For(i,n) free(hessin[i]);
	free(hessin);
	free(xi);
	free(pnew);
	free(hdg);
	free(g);
	free(dg);
	Free(best_bl);
	Free_Model_Complete(best_model);
}

void Print_Lk_Landscape_Uni(t_tree *tree)
{

	PhyML_Printf("\n. debug bfgs.c 1303 - Printing the  likelihood landscape.");
	PhyML_Printf("\n. stepsize_walk = %f", tree->io->stepsize_walk);
	PhyML_Printf("\n. n_steps_walk = %d", tree->io->n_steps_walk);

	phydbl *init_lens = (phydbl *)mCalloc(2*tree->n_otu-3,sizeof(phydbl));

	int i,j,k;
	phydbl init_len;
	phydbl min_len;
	phydbl this_len;
	phydbl init_lnl;
	i = tree->io->fix_param;
#ifdef ENABLE_MBL
	  For(k, n_l)
	  {
		  // walk smaller
		  init_len = tree->t_edges[i]->l[k];
		  tree->t_edges[i]->l[k] -= tree->io->stepsize_walk * (tree->io->n_steps_walk+1);
		  if (tree->t_edges[i]->l[k] < 0.0)
		  {	tree->t_edges[i]->l[k] = 0.0;
		  }
		  For(j, tree->io->n_steps_walk)
		  {
			  tree->t_edges[i]->l[k] += tree->io->stepsize_walk;
			  if( tree->t_edges[i]->l[k] >= init_len )
			  {	break;
			  }
			  Lk(tree);
			  PhyML_Printf("\n. bfgs.c 1303 branch %d l= %f lnl= %f", i, -1*tree->io->stepsize_walk*(j+1), tree->c_lnL);
		  }

		  // print the current value
		  tree->t_edges[i]->l[k] = init_len;
		  Lk(tree);
		  PhyML_Printf("\n. bfgs.c 1303 branch %d *l= 0.0 lnl= %f (length = %f)", i, tree->c_lnL, init_len);

		  // walk bigger
		  j = 0;
		  For(j, tree->io->n_steps_walk)
		  {
			  Lk(tree);
			  tree->t_edges[i]->l[k] += tree->io->stepsize_walk;
			  Lk(tree);
			  PhyML_Printf("\n. bfgs.c 1303 branch %d l= %f lnl= %f", i, tree->io->stepsize_walk*(j+1), tree->c_lnL);
		  }
		  tree->t_edges[i]->l[k] = init_len;
	  }
#else

	  Record_Br_Len(init_lens, tree);
	  init_len = tree->t_edges[i]->l;
	  min_len = tree->t_edges[i]->l - (tree->io->stepsize_walk * (tree->io->n_steps_walk));

	  // test the current value
	  // For unimax, just print the lk:
	  if (tree->io->opt_algorithm == 0)
	  {
		  Lk(tree);
	  }
	  // For Multimax, optimize all other free params
	  else if (tree->io->opt_algorithm == 1)
	  {
		  int failed;
		  BFGS_Full(tree,
					1.e-3,
					tree->io->bfgs_stepsize,
					&Return_Abs_Lk,
					&Num_Derivative,
					&Lnsrch_Generic,
					&failed);
		  Lk(tree);
	  }
	  init_lnl = tree->c_lnL;
	  PhyML_Printf("\n. bfgs.c 1303 branch %d *l= 0.0 lnl= 0.00 (length = %f) (init_lnl = %f )\n", i, init_len, init_lnl);


	  // walk smaller
	  For(j, tree->io->n_steps_walk)
	  {
		  Restore_Br_Len(init_lens, tree);
		  Lk(tree);
		  tree->t_edges[i]->l = min_len + j*tree->io->stepsize_walk;
		  phydbl this_len = tree->t_edges[i]->l;
		  if (this_len < 0.0)
		  {
			  continue;
		  }
		  if( tree->t_edges[i]->l >= init_len )
		  {
			  break;
		  }

		  // For unimax, just print the lk:
		  if (tree->io->opt_algorithm == 0)
		  {
			  Lk(tree);
		  }
		  // For Multimax, optimize all other free params
		  else if (tree->io->opt_algorithm == 1)
		  {
			  int failed;
			  BFGS_Full(tree,
						1.e-3,
						tree->io->bfgs_stepsize,
						&Return_Abs_Lk,
						&Num_Derivative,
						&Lnsrch_Generic,
						&failed);
			  Lk(tree);
		  }
		  PhyML_Printf("\n. bfgs.c 1303 branch %d l= %.3f lnl= %f\n", i, -1*(init_len - this_len), tree->c_lnL - init_lnl);
	  }

	  // walk bigger
	  For(j, tree->io->n_steps_walk)
	  {
		  Restore_Br_Len(init_lens, tree);
		  Lk(tree);
		  tree->t_edges[i]->l = init_len + (j+1)*tree->io->stepsize_walk;
		  // For unimax, just print the lk:
		  if (tree->io->opt_algorithm == 0)
		  {
			  Lk(tree);
		  }
		  else if (tree->io->opt_algorithm == 1)
		  {
			  int failed;
			  BFGS_Full(tree,
						1.e-3,
						tree->io->bfgs_stepsize,
						&Return_Abs_Lk,
						&Num_Derivative,
						&Lnsrch_Generic,
						&failed);
			  Lk(tree);
		  }
		  PhyML_Printf("\n. bfgs.c 1303 branch %d l= %f lnl= %f\n", i, tree->io->stepsize_walk*(j+1), tree->c_lnL - init_lnl);
	  }
	  Restore_Br_Len(init_lens, tree);
	  Lk(tree);
#endif

	free(init_lens);
}


#undef EPS
#undef TOLX
#undef STPMX
