/*
namespace Crap
{
	/// <summary>
	/// Minimization algorithms taken from Numerical Recipes in C++.  Altered for C#.  Put into a class for optimal memory management on repeated usage.
	/// </summary>
	public class EnergyMin
	{
		/// <summary>
		/// For conditioning SVD in MrqMin()
		/// </summary>
		private float svd_wmax;
		/// <summary>
		/// Used as working space during iterations.  On finish, gives the covariance matrix of noise for the free parameters.
		/// </summary>
		public float[,] Covar;
		/// <summary>
		/// Used as working space during iterations.  On finish, gives the curvature matrix of free parameters at the solution point.
		/// </summary>
		public float[,] Alpha;
		public float[] Beta;
		/// <summary>
		/// Used as working space during iterations in MrqCof.
		/// </summary>
		public float[] Dyda, atry;
		private float chiTry, ymod, wt, dy, chiImprovement;
		private float[] da, onedaTemp, onedaTempOut, wVec;
		private float[,] Vmatrix, oneda;
		/// <summary>
		/// Allocates the internal memory necessary for execution (if necessary).  Called locally in Run().
		/// </summary>
		/// <param name="nParams"></param>
		public void InternalMalloc(int nParams)
		{
			if(atry==null  ||  nParams > atry.Length)
			{
				atry=new float[nParams];
				Beta=new float[nParams];
				da=new float[nParams];

				Alpha = new float[nParams, nParams];
				Covar = new float[nParams, nParams];
				Dyda = new float[nParams];

				//	These were supposed to be Nfit length instead of nParams.  nParams is always longer or equal to Nfit.
				//	I changed this so that static memory was always determined by nParams, and Nfit has nothing to do with it.
				oneda = new float[nParams,1];
				onedaTemp = new float[nParams];
				onedaTempOut = new float[nParams];
				Vmatrix = new float[nParams,nParams];
				wVec = new float[nParams];
			}
		}
		/// <summary>
		/// The length of the data set, or the number of significant elements in X, Y, and Sd.
		/// </summary>
		public int Ndata;
		/// <summary>
		/// An improvement in Chisq of this small a value has no meaning.  Thus, iteration stops when improvements are this small.
		/// </summary>
		public float ChisqErrTolerance;
		/// <summary>
		/// This delegates a function that calculates the energy for a given set of parameters, a, at data row x.
		/// This function also gives the derivative of energy with respect to changes in a.
		/// </summary>
		public delegate float MrqFunc(int x, float[] a, float[] dyda);
		/// <summary>
		/// The model which takes, as arguments, a set of free parameters in order to generate predictions of the model.
		/// </summary>
		public MrqFunc Func;
		/// <summary>
		/// The boolean vector that determines whether a particular free parameter (represented by A) needs to be optimized.
		/// If this is false, the parameter remains constant during the optimization procedure.
		/// </summary>
		public bool[] Op;
		/// <summary>
		/// The free parameters for the model.
		/// </summary>
		public float[] A;
		/// <summary>
		/// Increases with salience of the second derivatives.
		/// </summary>
		public float Lamda;
		/// <summary>
		/// The value of chi-squared for the set of free parameters (A), or Energy, or Whatever.
		/// </summary>
		public float Chisq;
		//	The number of true values in the Op vector.  Updated in the constructor.
		public int Nfit;
		/// <summary>
		/// Counts the number of iterations through MrqMin();
		/// </summary>
		public int Iterations;
		public void Init(int Ndata, MrqFunc Func, float[] A, bool[] Op, float ChisqErrTolerance)
		{
			//	Set the member variables accordingly.
			this.Func = Func;
			this.A = A;
			this.Op = Op;
			this.ChisqErrTolerance = ChisqErrTolerance;
			this.Ndata = Ndata;

			//	Insure that static memory is properly allocated.
			InternalMalloc(A.Length);

			//	Compute Nfit;
			Nfit = 0;
			for(int i=0; i<Op.Length; i++)
				if(Op[i])
					Nfit++;
		}
		public void Run(int Ndata, MrqFunc Func, float[] A, bool[] Op, float ChisqErrTolerance)
		{
			Init(Ndata, Func, A, Op, ChisqErrTolerance);
			Run();
		}
		public void Run()
		{
			//	Set Lamda initially low.  Conduct the first fit.
			Lamda=1e-2f;
			//	First pass, MrqCof
			Iterations=0;
			Chisq = MrqCof(A, Alpha, Beta);
			//	Copy the values to atry, the vector of free parameters to try.
			A.CopyTo(atry, 0);
			//	Go
			MrqMin();
		}
		/// <summary>
		/// Initiates a Levenberg-Marquardt fit of the data provided.  On output, the parameters A are changed to their optimal values that
		/// have minimized sum-squared error.  The member variable 
		/// </summary>
		/// <param name="Ndata">int  The length of the data set, such as the number of rows in your spreadsheet.</param>
		/// <param name="X">float[]  Input only.  The independent variables.  This is not changed.</param>
		/// <param name="Y">float[]  Input only.  The data that is dependent on the independent variables.  This is not changed.</param>
		/// <param name="Sd">float[]  Input only.  The standard deviation of Y.  This is not changed.</param>
		/// <param name="Func">MrqFunc  Input only.  The model that generates a predicted value of Y based on a value of X and a set of free parameters A.</param>
		/// <param name="A">float[]  Input and output.  First guess at free parameters.  This becomes the member variable A.  These values are altered.</param>
		/// <param name="Op">float[]  Input only.  This becomes the member variable Op.  This is not altered.</param>
		/// <param name="ChisqErrTolerance">float greater than zero.
		/// An improvement in Chisq of this small a value has no meaning.  Thus, iteration stops when improvements are this small.</param>
		public EnergyMin(int Ndata, MrqFunc Func, float[] A, bool[] Op, float ChisqErrTolerance)
		{
			Init(Ndata, Func, A, Op, ChisqErrTolerance);
		}
				
		/// <summary>
		/// Used by MrqMin to evaluate the linearized fitting matrix "Alpha", vector "Beta".
		/// </summary>
		/// <returns>The value of energy for this set of free parameters.</returns>
		public float MrqCof(float[] a, float[,] alpha, float[] beta)
		{
			int i,j,k,l,m;
			float chisq=0.0f;
					
			for (j=0;j<Nfit;j++)
			{
				for (k=0;k<=j;k++)
					alpha[j,k]=0.0f;
				beta[j]=0.0f;
			}
			for (i=0;i<Ndata;i++)
			{
				ymod=Func(i,a,Dyda);
				dy=-(float)Math.Sqrt(ymod);
				for (j=0,l=0;l<a.Length;l++)
				{
					if (Op[l])
					{
						wt=Dyda[l];
						for(k=0,m=0;m<l+1;m++)
							if(Op[m])
								alpha[j,k++] += wt*Dyda[m];
						beta[j++] += dy*wt;
					}
				}
				chisq += ymod;
			}
			for(j=1;j<Nfit;j++)
				for(k=0;k<j;k++)
					alpha[k,j]=alpha[j,k];
			return chisq;
		}
		/// <summary>
		/// Drives the minimization routine.
		/// </summary>
		public void MrqMin()
		{
			while(true)
			{
				Iterations++;
				int j,k,l;

				for(j=0;j<Nfit;j++)
				{
					for(k=0;k<Nfit;k++)
					{
						Covar[j,k]=Alpha[j,k];
					}
					Covar[j,j]=Alpha[j,j]*(1.0f+Lamda);
					oneda[j,0]=Beta[j];
				}

				//	The following try/catch loop is an edition by Dave Ing.  The original algorithm is just "GaussJordan.Run(Covar,Nfit,oneda,1);"
				//	Use GaussJordan to invert the covariance matrix as fast as possible in addition to computing the optimal step.
				try
				{
					GaussJordanFloat.Run(Covar,Nfit,oneda,1);
				}
					//	Singularity, use SVD instead.
				catch
				{
					for(j=0;j<Nfit;j++)
					{
						for(k=0;k<Nfit;k++)
						{
							Covar[j,k]=Alpha[j,k];
						}
						Covar[j,j]=Alpha[j,j]*(1.0f+Lamda);
						onedaTemp[j]=oneda[j,0]=Beta[j];
					}
					SvdFloat.Decomp(Covar, Nfit, Nfit, wVec, Vmatrix);
					svd_wmax=0.0f;
					for(j=0; j<Nfit; j++)
						if(wVec[j]>svd_wmax)
							svd_wmax = wVec[j];
					svd_wmax/=50.0f;
					for(j=0; j<Nfit; j++)
						if(wVec[j]<svd_wmax)
							wVec[j]=0.0f;
					SvdFloat.Backsub(Covar, Nfit, Nfit, wVec, Vmatrix, onedaTemp, onedaTempOut);
					for(j=0; j<Nfit; j++)
						oneda[j,0] = onedaTempOut[j];
				}
					
				for (j=0;j<Nfit;j++)
					da[j]=oneda[j,0];
				if (Lamda == 0.0)
				{
					CovSrt(Covar,A.Length,Op,Nfit);
					CovSrt(Alpha,A.Length,Op,Nfit);
					return;
				}
				for (j=0,l=0;l<A.Length;l++)
					if (Op[l])
						atry[l]=A[l]+da[j++];
				chiTry=MrqCof(atry, Covar, da);
				if (chiTry < Chisq)
				{
					//	Decrease the diagonal dominance of the curvature matrix (salience of the second derivatives).
					Lamda *= 0.1f;
					chiImprovement=Chisq-chiTry;
					Chisq=chiTry;
					for (j=0;j<Nfit;j++)
					{
						for (k=0;k<Nfit;k++)
							Alpha[j,k]=Covar[j,k];
						Beta[j]=da[j];
					}
					for (l=0;l<A.Length;l++) A[l]=atry[l];
					//	Stopping criteria:  Is the Chisq improvement less than the ErrTolerance parameter.
					if(chiImprovement < ChisqErrTolerance)
						Lamda=0.0f;	//	stop algorithm on next run.
				}
				else
				{
					//	Increase the diagonal dominance of the curvature matrix (salience of the second derivatives).
					if(Lamda>1e5f)
						Lamda *= 1e5f;
					else if(Lamda<1e-2f)
						Lamda *= 10000.0f;
					else
						Lamda *= 10.0f;
					if(Lamda>1e9f)
						Lamda=0.0f;	//	Exit
				}
			}
		}
		public EnergyMin(){}
	}
	public class Float2
	{
		private float[,] m;
		public float[,] M
		{
			get
			{
				return m;
			}
			set
			{
				m = value;
				rows = m.GetLength(0);
				cols = m.GetLength(1);
			}
		}
		private int rows, cols;
		private int _i,_j;
		public Float2(int rows, int cols)
		{
			this.rows = rows;
			this.cols = cols;
			m = new float[rows,cols];
		}
		public Float2(float[,] M)
		{
			this.m=M;
			rows = m.GetLength(0);
			cols = m.GetLength(1);
		}
		public float this[int i, int j]
		{
			get
			{
				return m[i,j];
			}
			set
			{
				m[i,j]=value;
			}
		}
		public void SetValue(float val, int i)
		{
			_i=i/cols;
			m[_i, i-_i*cols]=val;
		}
		public void SetValue(float val, int i, int j)
		{
			m[i,j]=val;
		}
		public float GetValue(int i)
		{
			_i=i/cols;
			return m[_i, i-_i*cols];
		}
		public float GetValue(int i, int j)
		{
			return m[i,j];
		}
		public int Length
		{
			get
			{
				return m.Length;
			}
		}
		public void CopyTo(Float2 dest)
		{
			float[,] d = dest.M;
			for(_i=0; _i<rows; _i++)
				for(_j=0; _j<cols; _j++)
					d[_i,_j]=m[_i,_j];
		}
		public void CopyTo(float[,] dest)
		{
			for(_i=0; _i<rows; _i++)
				for(_j=0; _j<cols; _j++)
					dest[_i,_j]=m[_i,_j];
		}
		public int Rows
		{
			get
			{
				return rows;
			}
		}
		public int Cols
		{
			get
			{
				return cols;
			}
		}
	}
	public class Bool2
	{
		private bool[,] m;
		public bool[,] M
		{
			get
			{
				return m;
			}
			set
			{
				m = value;
				rows = m.GetLength(0);
				cols = m.GetLength(1);
			}
		}
		private int rows, cols;
		private int _i,_j;
		public Bool2(int rows, int cols)
		{
			this.rows = rows;
			this.cols = cols;
			m = new bool[rows,cols];
		}
		public Bool2(bool[,] M)
		{
			this.m=M;
			rows = m.GetLength(0);
			cols = m.GetLength(1);
		}
		public bool this[int i, int j]
		{
			get
			{
				return m[i,j];
			}
			set
			{
				m[i,j]=value;
			}
		}
		public void SetValue(bool val, int i)
		{
			_i=i/cols;
			m[_i, i-_i*cols]=val;
		}
		public void SetValue(bool val, int i, int j)
		{
			m[i,j]=val;
		}
		public bool GetValue(int i)
		{
			_i=i/cols;
			return m[_i, i-_i*cols];
		}
		public bool GetValue(int i, int j)
		{
			return m[i,j];
		}
		public int Length
		{
			get
			{
				return m.Length;
			}
		}
		public void CopyTo(Bool2 dest)
		{
			bool[,] d = dest.M;
			for(_i=0; _i<rows; _i++)
				for(_j=0; _j<cols; _j++)
					d[_i,_j]=m[_i,_j];
		}
		public void CopyTo(bool[,] dest)
		{
			for(_i=0; _i<rows; _i++)
				for(_j=0; _j<cols; _j++)
					dest[_i,_j]=m[_i,_j];
		}
		public int Rows
		{
			get
			{
				return rows;
			}
		}
		public int Cols
		{
			get
			{
				return cols;
			}
		}
	}
	public delegate void UpdatesSuccess();
	/// <summary>
	/// Minimization algorithms taken from Numerical Recipes in C++.  Altered for C#.  Put into a class for optimal memory management on repeated usage.
	/// </summary>
	public class EnergyMin2
	{
		/// <summary>
		/// Expand in storage the covariance matrix "covar" to take into account the fixed parameters.
		/// </summary>
		/// <param name="covar">double[,] Covariance matrix.</param>
		/// <param name="ma"></param>
		/// <param name="ia"></param>
		/// <param name="mfit"></param>
		public static void CovSrt(float[,] covar, int ma, Bool2 ia, int mfit)
		{
			int i,j,k;
			for(i=mfit;i<ma;i++)
				for(j=0;j<i+1;j++)
					covar[i,j]=covar[j,i]=0.0f;
			k=mfit-1;
			for (j=ma-1;j>=0;j--)
			{
				if ((bool)ia.GetValue(j))
				{
					for (i=0;i<ma;i++)
						Swap.Run(ref covar[i,k],ref covar[i,j]);
					for (i=0;i<ma;i++)
						Swap.Run(ref covar[k,i],ref covar[j,i]);
					k--;
				}
			}
		}
		
		/// <summary>
		/// For conditioning SVD in MrqMin()
		/// </summary>
		private float svd_wmax;
		/// <summary>
		/// Used as working space during iterations.  On finish, gives the covariance matrix of noise for the free parameters.
		/// </summary>
		public float[,] Covar;
		/// <summary>
		/// Used as working space during iterations.  On finish, gives the curvature matrix of free parameters at the solution point.
		/// </summary>
		public float[,] Alpha;
		public float[] Beta;
		/// <summary>
		/// Used as working space during iterations in MrqCof.
		/// </summary>
		public Float2 Dyda, atry;
		private float chiTry, ymod, wt, dy, chiImprovement;
		private float[] da, onedaTemp, onedaTempOut, wVec;
		private float[,] Vmatrix, oneda;
		/// <summary>
		/// Allocates the internal memory necessary for execution (if necessary).  Called locally in Run().
		/// </summary>
		/// <param name="nParams"></param>
		public void InternalMalloc(int nParams)
		{
			if(atry==null  ||  nParams > atry.Length)
			{
				atry=new Float2(A.Rows, A.Cols);
				Beta=new float[nParams];
				da=new float[nParams];

				Alpha = new float[nParams, nParams];
				Covar = new float[nParams, nParams];
				Dyda = new Float2(A.Rows, A.Cols);

				//	These were supposed to be Nfit length instead of nParams.  nParams is always longer or equal to Nfit.
				//	I changed this so that static memory was always determined by nParams, and Nfit has nothing to do with it.
				oneda = new float[nParams,1];
				onedaTemp = new float[nParams];
				onedaTempOut = new float[nParams];
				Vmatrix = new float[nParams,nParams];
				wVec = new float[nParams];
			}
		}
		/// <summary>
		/// The length of the data set, or the number of significant elements in X, Y, and Sd.
		/// </summary>
		public int Ndata;
		/// <summary>
		/// An improvement in Chisq of this small a value has no meaning.  Thus, iteration stops when improvements are this small.
		/// </summary>
		public float ChisqErrTolerance;
		/// <summary>
		/// This delegates a function that calculates the energy for a given set of parameters, a, at data row x.
		/// This function also gives the derivative of energy with respect to changes in a.
		/// </summary>
		public delegate float MrqFunc(int x, Float2 a, Float2 dyda);
		/// <summary>
		/// The model which takes, as arguments, a set of free parameters in order to generate predictions of the model.
		/// </summary>
		public MrqFunc Func;
		public delegate void PulseFunc();
		/// <summary>
		/// Sends a pulse each time the F matrix is optimized once.
		/// </summary>
		public PulseFunc Pulser = null;
		/// <summary>
		/// Called on a successful update.
		/// </summary>
		/// <returns></returns>
		public UpdatesSuccess OnSuccess;
		/// <summary>
		/// The boolean vector that determines whether a particular free parameter (represented by A) needs to be optimized.
		/// If this is false, the parameter remains constant during the optimization procedure.
		/// </summary>
		public Bool2 Op;
		/// <summary>
		/// The free parameters for the model.
		/// </summary>
		public Float2 A;
		/// <summary>
		/// Increases with salience of the second derivatives.
		/// </summary>
		public float Lamda;
		/// <summary>
		/// The value of chi-squared for the set of free parameters (A), or Energy, or Whatever.
		/// </summary>
		public float Chisq;
		//	The number of true values in the Op vector.  Updated in the constructor.
		public int Nfit;
		/// <summary>
		/// Counts the number of iterations through MrqMin();
		/// </summary>
		public int Iterations;
		public void Init(int Ndata, MrqFunc Func, Float2 A, Bool2 Op, float ChisqErrTolerance)
		{
			int i;

			//	Set the member variables accordingly.
			this.Func = Func;
			this.A = A;
			this.Op = Op;
			this.ChisqErrTolerance = ChisqErrTolerance;
			this.Ndata = Ndata;

			//	Insure that static memory is properly allocated.
			InternalMalloc(A.Length);

			//	Compute Nfit;
			Nfit = 0;
			for(i=0; i<Op.Length; i++)
				if((bool)Op.GetValue(i))
					Nfit++;
		}
		public void Run(int Ndata, MrqFunc Func, Float2 A, Bool2 Op, float ChisqErrTolerance)
		{
			Init(Ndata, Func, A, Op, ChisqErrTolerance);
			Run();
		}
		public void Run()
		{
			//	Set Lamda initially low.  Conduct the first fit.				
			Lamda=1e-2f;
			//	First pass, MrqCof
			Iterations=0;
			Chisq = MrqCof(A, Alpha, Beta);
			//	Send a pulse after the first run through.
			if(Pulser!=null)
				Pulser();
			//	Copy the values to atry, the vector of free parameters to try.
			A.CopyTo(atry);
			//	Go
			MrqMin();
		}
		/// <summary>
		/// Initiates a Levenberg-Marquardt fit of the data provided.  On output, the parameters A are changed to their optimal values that
		/// have minimized sum-squared error.  The member variable 
		/// </summary>
		/// <param name="Ndata">int  The length of the data set, such as the number of rows in your spreadsheet.</param>
		/// <param name="X">float[]  Input only.  The independent variables.  This is not changed.</param>
		/// <param name="Y">float[]  Input only.  The data that is dependent on the independent variables.  This is not changed.</param>
		/// <param name="Sd">float[]  Input only.  The standard deviation of Y.  This is not changed.</param>
		/// <param name="Func">MrqFunc  Input only.  The model that generates a predicted value of Y based on a value of X and a set of free parameters A.</param>
		/// <param name="A">float[]  Input and output.  First guess at free parameters.  This becomes the member variable A.  These values are altered.</param>
		/// <param name="Op">float[]  Input only.  This becomes the member variable Op.  This is not altered.</param>
		/// <param name="ChisqErrTolerance">float greater than zero.
		/// An improvement in Chisq of this small a value has no meaning.  Thus, iteration stops when improvements are this small.</param>
		public EnergyMin2(int Ndata, MrqFunc Func, Float2 A, Bool2 Op, float ChisqErrTolerance)
		{
			Init(Ndata, Func, A, Op, ChisqErrTolerance);
		}
				
		/// <summary>
		/// Initiates a Levenberg-Marquardt fit of the data provided.  On output, the parameters A are changed to their optimal values that
		/// have minimized sum-squared error.  The member variable 
		/// </summary>
		/// <param name="Ndata">int  The length of the data set, such as the number of rows in your spreadsheet.</param>
		/// <param name="X">float[]  Input only.  The independent variables.  This is not changed.</param>
		/// <param name="Y">float[]  Input only.  The data that is dependent on the independent variables.  This is not changed.</param>
		/// <param name="Sd">float[]  Input only.  The standard deviation of Y.  This is not changed.</param>
		/// <param name="Func">MrqFunc  Input only.  The model that generates a predicted value of Y based on a value of X and a set of free parameters A.</param>
		/// <param name="A">float[]  Input and output.  First guess at free parameters.  This becomes the member variable A.  These values are altered.</param>
		/// <param name="Op">float[]  Input only.  This becomes the member variable Op.  This is not altered.</param>
		/// <param name="ChisqErrTolerance">float greater than zero.
		/// An improvement in Chisq of this small a value has no meaning.  Thus, iteration stops when improvements are this small.</param>
		public EnergyMin2(int Ndata, MrqFunc Func, Float2 A, Bool2 Op, float ChisqErrTolerance, 
			PulseFunc Pulser, UpdatesSuccess OnSuccess)
		{
			this.Pulser = Pulser;
			this.OnSuccess = OnSuccess;
			Init(Ndata, Func, A, Op, ChisqErrTolerance);
		}
				
		/// <summary>
		/// Used by MrqMin to evaluate the linearized fitting matrix "Alpha", vector "Beta".
		/// </summary>
		/// <returns>The value of energy for this set of free parameters.</returns>
		public float MrqCof(Float2 a, float[,] alpha, float[] beta)
		{
			int i,j,k,l,m;
			float chisq=0.0f;
					
			for (j=0;j<Nfit;j++)
			{
				for (k=0;k<=j;k++)
					alpha[j,k]=0.0f;
				beta[j]=0.0f;
			}
			for (i=0;i<Ndata;i++)
			{
				ymod=Func(i,a,Dyda);
				dy=-(float)Math.Sqrt(ymod);
				for (j=0,l=0;l<a.Length;l++)
				{
					if ((bool)Op.GetValue(l))
					{
						wt=(float)Dyda.GetValue(l);
						for(k=0,m=0;m<l+1;m++)
							if((bool)Op.GetValue(m))
								alpha[j,k++] += wt*(float)Dyda.GetValue(m);
						beta[j++] += dy*wt;
					}
				}
				chisq += ymod;
			}
			for(j=1;j<Nfit;j++)
				for(k=0;k<j;k++)
					alpha[k,j]=alpha[j,k];
			return chisq;
		}
		/// <summary>
		/// Drives the minimization routine.
		/// </summary>
		public void MrqMin()
		{
			while(true)
			{
				Iterations++;
				int j,k,l;

				for(j=0;j<Nfit;j++)
				{
					for(k=0;k<Nfit;k++)
					{
						Covar[j,k]=Alpha[j,k];
					}
					Covar[j,j]=Alpha[j,j]*(1.0f+Lamda);
					oneda[j,0]=Beta[j];
				}

				//	The following try/catch loop is an edition by Dave Ing.  The original algorithm is just "GaussJordan.Run(Covar,Nfit,oneda,1);"
				//	Use GaussJordan to invert the covariance matrix as fast as possible in addition to computing the optimal step.
				try
				{
					GaussJordanFloat.Run(Covar,Nfit,oneda,1);
				}
					//	Singularity, use SVD instead.
				catch
				{
					for(j=0;j<Nfit;j++)
					{
						for(k=0;k<Nfit;k++)
						{
							Covar[j,k]=Alpha[j,k];
						}
						Covar[j,j]=Alpha[j,j]*(1.0f+Lamda);
						onedaTemp[j]=oneda[j,0]=Beta[j];
					}
							
					SvdFloat.Decomp(Covar, Nfit, Nfit, wVec, Vmatrix);
					svd_wmax=0.0f;
					for(j=0; j<Nfit; j++)
						if(wVec[j]>svd_wmax)
							svd_wmax = wVec[j];
					svd_wmax/=50.0f;
					for(j=0; j<Nfit; j++)
						if(wVec[j]<svd_wmax)
							wVec[j]=0.0f;
					SvdFloat.Backsub(Covar, Nfit, Nfit, wVec, Vmatrix, onedaTemp, onedaTempOut);
					for(j=0; j<Nfit; j++)
						oneda[j,0] = onedaTempOut[j];
				}
					
				for (j=0;j<Nfit;j++)
					da[j]=oneda[j,0];
				if (Lamda == 0.0)
				{
					CovSrt(Covar,A.Length,Op,Nfit);
					CovSrt(Alpha,A.Length,Op,Nfit);
					return;
				}
				for (j=0,l=0;l<A.Length;l++)
					if ((bool)Op.GetValue(l))
						atry.SetValue((float)A.GetValue(l)+da[j++],l);
				chiTry=MrqCof(atry, Covar, da);
				if (chiTry < Chisq)
				{
					//	Decrease the diagonal dominance of the curvature matrix (salience of the second derivatives).
					Lamda *= 0.1f;
					chiImprovement=Chisq-chiTry;
					Chisq=chiTry;
					for (j=0;j<Nfit;j++)
					{
						for (k=0;k<Nfit;k++)
							Alpha[j,k]=Covar[j,k];
						Beta[j]=da[j];
					}
					for (l=0;l<A.Length;l++) A.SetValue(atry.GetValue(l),l);
					//	Stopping criteria:  Is the Chisq improvement less than the ErrTolerance parameter.
					if(chiImprovement < ChisqErrTolerance)
						Lamda=0.0f;	//	stop algorithm on next run.
					//	Success Delegate
					if(OnSuccess!=null)
						OnSuccess();
					//	Pulse Delegate
					if(Pulser!=null)
						Pulser();
				}
				else
				{
					//	Increase the diagonal dominance of the curvature matrix (salience of the second derivatives).
					if(Lamda>1e2f)
						Lamda *= 1e2f;
					else if(Lamda>1e4f)
						Lamda *= 1e4f;
					else if(Lamda<1e-2f)
						Lamda *= 10000.0f;
					else
						Lamda *= 10.0f;
					if(Lamda>1e9f)
						Lamda=0.0f;	//	Exit
					//	Pulse Delegate
					if(Pulser!=null)
						Pulser();
				}
			}
		}
		public EnergyMin2() {}
	}
}
*/