package optimization.naturalOptimization.evolutionaryAlgorithm.evolutionStrategy.cma;

import java.util.Arrays;
import java.util.Comparator;
import java.util.Properties;

import optimization.naturalOptimization.NaturalOptimization;
import optimization.randomNumberGenerator.RandomNumberGenerator;

/**
 * @see #samplePopulation()
 * @see #updateDistribution(double[])
 * @author Nikolaus Hansen, 1996, 2003, 2005, 2007
 */
public class CMAEvolutionStrategy implements java.io.Serializable {
	/**
	 * 
	 */
	private static final long serialVersionUID = 2918241407634253526L;

	/**
     * 
     */
	public final String versionNumber = new String("0.99.40");

	/**
	 * Interface to whether and which termination criteria are satisfied
	 */

	void testAndCorrectNumerics() { // not much left here
		boolean flat = Boolean.valueOf(getParentOptimization().getProperties()
				.getProperty("testFlatFitness", "true"));
		/* Flat Fitness, Test if function values are identical */
		if ((getCountIter() > 1 || (getCountIter() == 1 && state >= 3)) && flat) {
			if (fit.fitness[0].getValue() == fit.fitness[Math.min(
					sp.getLambda() - 1, sp.getLambda() / 2 + 1) - 1].getValue()) {
				warning("flat fitness landscape, consider reformulation of fitness, step-size increased");
				sigma *= Math.exp(0.2 + sp.getCs() / sp.getDamps());
			}
		}

		/* Align (renormalize) scale C (and consequently sigma) */
		/*
		 * e.g. for infinite stationary state simulations (noise handling needs
		 * to be introduced for that)
		 */
		double fac = 1;
		if (math.max(diagD) < 1e-6)
			fac = 1. / math.max(diagD);
		else if (math.min(diagD) > 1e4)
			fac = 1. / math.min(diagD);

		if (fac != 1.) {
			sigma /= fac;
			for (int i = 0; i < N; ++i) {
				pc[i] *= fac;
				diagD[i] *= fac;
				for (int j = 0; j <= i; ++j)
					C[i][j] *= fac * fac;
			}
		}
	} // Test...

	/**
	 * options that can be changed (fields can be assigned) at any time to
	 * control the running behavior
	 * */
	private CMAOptions options = new CMAOptions();

	private CMAParameters sp = new CMAParameters(); // alias for inside use
	/**
	 * strategy parameters that can be set initially
	 * */

	int N;
	RandomNumberGenerator rand; // Note: it also Serializable

	final MyMath math = new MyMath();
	double axisratio;
	long counteval;
	long countiter;

	long bestever_eval;
	double[] bestever_x;
	double bestever_fit = Double.NaN;
	// CMASolution bestever; // used as output variable

	double sigma = 0.0;
	double[] typicalX; // eventually used to set initialX
	double[] initialX; // set in the end of init()
	double[] LBound, UBound; // bounds
	double[] xmean;
	double xmean_fit = Double.NaN;
	double[] pc;
	double[] ps;
	double[][] C;
	double maxsqrtdiagC;
	double minsqrtdiagC;
	double[][] B;
	double[] diagD;
	boolean flgdiag; // 0 == full covariance matrix

	/* init information */
	double[] startsigma;
	double maxstartsigma;
	double minstartsigma;

	boolean iniphase;

	private NaturalOptimization parentOptimization;

	/**
	 * state (postconditions): -1 not yet initialized 0 initialized init() 0.5
	 * reSizePopulation 1 samplePopulation, sampleSingle, reSampleSingle 2.5
	 * updateSingle 3 updateDistribution
	 */
	double state = -1;
	long citerlastwritten = 0;
	long countwritten = 0;
	int lockDimension = 0;
	int mode = 0;
	final int SINGLE_MODE = 1; // not in use anymore, keep for later
								// developements?
	final int PARALLEL_MODE = 2;

	long countCupdatesSinceEigenupdate;

	protected FitnessCollector fit = new FitnessCollector();

	double recentFunctionValue;
	double recentMaxFunctionValue;
	double recentMinFunctionValue;
	int idxRecentOffspring;

	double[][] arx;
	/** recent population, no idea whether this is useful to be public */
	public double[][] population; // returned not as a copy
	double[] xold;

	double[] BDz;
	double[] artmp;

	String propertiesFileName = new String("CMAEvolutionStrategy.properties");

	/**
	 * postpones most initialization. For initialization use setInitial...
	 * methods or set up a properties file, see file
	 * "CMAEvolutionStrategy.properties".
	 */
	public CMAEvolutionStrategy() {
		state = -1;
	}

	/**
	 * @param dimension
	 *            search space dimension, dimension of the objective functions
	 *            preimage, number of variables
	 */
	public CMAEvolutionStrategy(int dimension) {
		setDimension(dimension);
		sp = sp.getDefaults(dimension);
		state = -1;
	}

	/**
	 * @param dimension
	 *            search space dimension, dimension of the objective functions
	 *            preimage, number of variables
	 * @param lambda
	 *            offspring size
	 */
	public CMAEvolutionStrategy(int dimension, int lambda) {
		setDimension(dimension);
		sp = sp.getDefaults(dimension, lambda);
		state = -1;
	}

	/**
	 * initialization providing all mandatory input arguments at once. The
	 * following two is equivalent
	 * 
	 * <PRE>
	 * cma.init(N, X, SD);
	 * </PRE>
	 * 
	 * and
	 * 
	 * <PRE>
	 * cma.setInitalX(X); //
	 * cma.setInitialStandardDeviations(SD);
	 * cma.init(N);
	 * </PRE>
	 * 
	 * The call to <code>init</code> is a point of no return for parameter
	 * settings, and demands all mandatory input be set. <code>init</code> then
	 * forces the setting up of everything and calls
	 * <code>parameters.supplementRemainders()</code>. If <code>init</code> was
	 * not called before, it is called once in <code>samplePopulation()</code>.
	 * The return value is only provided for sake of convenience.
	 * 
	 * @param dimension
	 * @param initialX
	 *            double[] can be of size one, where all variables are set to
	 *            the same value, or of size dimension
	 * @param initialStandardDeviations
	 *            can be of size one, where all standard deviations are set to
	 *            the same value, or of size dimension
	 * 
	 * @return <code>double[] fitness</code> of length population size lambda to
	 *         assign and pass objective function values to
	 *         <code>{@link #updateDistribution(double[])}</code>
	 * 
	 * @see #init()
	 * @see #init(int)
	 * @see #setInitialX(double[])
	 * @see #setTypicalX(double[])
	 * @see #setInitialStandardDeviations(double[])
	 * @see #samplePopulation()
	 * @see CMAParameters#supplementRemainders(int, CMAOptions)
	 */
	public double[] init(int dimension, double[] initialX,
			double[] initialStandardDeviations) {
		setInitialX(initialX);
		setInitialStandardDeviations(initialStandardDeviations);
		return init(dimension);
	}

	private double[] getArrayOf(double x, int dim) {
		double[] res = new double[dim];
		for (int i = 0; i < dim; ++i)
			res[i] = x;
		return res;
	}

	/**
	 * 
	 * @param x
	 *            null or x.length==1 or x.length==dim, only for the second case
	 *            x is expanded
	 * @param dim
	 * @return <code>null</code> or <code>double[] x</code> with
	 *         <code>x.length==dim</code>
	 */
	private double[] expandToDimension(double[] x, int dim) {
		if (x == null)
			return null;
		if (x.length == dim)
			return x;
		if (x.length != 1)
			error("x must have length one or length dimension");

		return getArrayOf(x[0], dim);
	}

	/**
	 * @param dimension
	 *            search space dimension
	 * @return empty array
	 * @see #init(int, double[], double[])
	 * */
	public double[] init(int dimension) {
		setDimension(dimension);
		return init();
	}

	/**
	 * @return empty array
	 * @see #init(int, double[], double[])
	 * */
	public double[] init() {
		int i;
		if (N <= 0)
			error("dimension needs to be determined, use eg. setDimension() or setInitialX()");
		if (state >= 0)
			error("init() cannot be called twice");
		if (state == 0) // less save variant
			return new double[sp.getLambda()];
		if (state > 0)
			error("init() cannot be called after the first population was sampled");

		if (sp.supplemented == 0) // a bit a hack
			sp.supplementRemainders(N, options);
		sp.locked = 1; // lambda cannot be changed anymore

		diagD = new double[N];
		for (i = 0; i < N; ++i)
			diagD[i] = 1;
		/* expand Boundaries */
		LBound = expandToDimension(LBound, N);
		if (LBound == null) {
			LBound = new double[N];
			for (i = 0; i < N; ++i)
				LBound[i] = Double.NEGATIVE_INFINITY;
		}

		UBound = expandToDimension(UBound, N);
		if (UBound == null) {
			UBound = new double[N];
			for (i = 0; i < N; ++i)
				UBound[i] = Double.POSITIVE_INFINITY;
		}

		/* Initialization of sigmas */
		if (startsigma != null) { //
			if (startsigma.length == 1) {
				sigma = startsigma[0];
			} else if (startsigma.length == N) {
				sigma = math.max(startsigma);
				if (sigma <= 0)
					error("initial standard deviation sigma must be positive");
				for (i = 0; i < N; ++i) {
					diagD[i] = startsigma[i] / sigma;
				}
			} else
				assert false;
		} else {
			// we might use boundaries here to find startsigma, but I prefer to
			// have stddevs mandatory
			error("no initial standard deviation specified, use setInitialStandardDeviations()");
			sigma = 0.5;
		}

		if (sigma <= 0 || math.min(diagD) <= 0) {
			error("initial standard deviations not specified or non-positive, "
					+ "use setInitialStandarddeviations()");
			sigma = 1;
		}
		/* save initial standard deviation */
		if (startsigma == null || startsigma.length == 1) {
			startsigma = new double[N];
			for (i = 0; i < N; ++i) {
				startsigma[i] = sigma * diagD[i];
			}
		}
		maxstartsigma = math.max(startsigma);
		minstartsigma = math.min(startsigma);
		axisratio = maxstartsigma / minstartsigma; // axis parallel distribution

		/* expand typicalX, might still be null afterwards */
		typicalX = expandToDimension(typicalX, N);

		/* Initialization of xmean */
		xmean = expandToDimension(xmean, N);
		if (xmean == null) {
			/* set via typicalX */
			if (typicalX != null) {
				xmean = typicalX.clone();
				for (i = 0; i < N; ++i)
					xmean[i] += sigma * diagD[i] * rand.nextGaussian();
				/* set via boundaries, is depriciated */
			} else if (math.max(UBound) < Double.MAX_VALUE
					&& math.min(LBound) > -Double.MAX_VALUE) {
				error("no initial search point (solution) X or typical X specified");
				xmean = new double[N];
				for (i = 0; i < N; ++i) {
					double offset = sigma * diagD[i];
					double range = (UBound[i] - LBound[i] - 2 * sigma
							* diagD[i]);
					if (offset > 0.4 * (UBound[i] - LBound[i])) {
						offset = 0.4 * (UBound[i] - LBound[i]);
						range = 0.2 * (UBound[i] - LBound[i]);
					}
					xmean[i] = LBound[i] + offset + rand.nextDouble() * range;
				}
			} else {
				error("no initial search point (solution) X or typical X specified");
				xmean = new double[N];
				for (i = 0; i < N; ++i)
					xmean[i] = rand.nextDouble();
			}
		}

		assert xmean != null;
		assert sigma > 0;

		/* interpret missing option value */
		if (options.diagonalCovarianceMatrix < 0) // necessary for hello world
													// message
			options.diagonalCovarianceMatrix = 1 * 150 * N / sp.lambda; // cave:
																		// duplication
																		// below

		/* non-settable parameters */
		pc = new double[N];
		ps = new double[N];
		B = new double[N][N];
		C = new double[N][N]; // essentially only i <= j part is used

		xold = new double[N];
		BDz = new double[N];
		bestever_x = xmean.clone();
		// bestever = new CMASolution(xmean);
		artmp = new double[N];

		fit.deltaFitHist = new double[5];
		fit.idxDeltaFitHist = -1;
		for (i = 0; i < fit.deltaFitHist.length; ++i)
			fit.deltaFitHist[i] = 1.;

		// code to be duplicated in reSizeLambda
		fit.fitness = new IntDouble[sp.getLambda()]; // including penalties,
														// used yet
		fit.raw = new IntDouble[sp.getLambda()]; // raw function values
		fit.history = new double[10 + 30 * N / sp.getLambda()];

		arx = new double[sp.getLambda()][N];
		population = new double[sp.getLambda()][N];

		for (i = 0; i < sp.getLambda(); ++i) {
			fit.fitness[i] = new IntDouble();
			fit.raw[i] = new IntDouble();
		}

		// initialization
		for (i = 0; i < N; ++i) {
			pc[i] = 0;
			ps[i] = 0;
			for (int j = 0; j < N; ++j) {
				B[i][j] = 0;
			}
			for (int j = 0; j < i; ++j) {
				C[i][j] = 0;
			}
			B[i][i] = 1;
			C[i][i] = diagD[i] * diagD[i];
		}
		maxsqrtdiagC = Math.sqrt(math.max(math.diag(C)));
		minsqrtdiagC = Math.sqrt(math.min(math.diag(C)));
		countCupdatesSinceEigenupdate = 0;
		iniphase = false; // obsolete

		/* Some consistency check */
		for (i = 0; i < N; ++i) {
			if (LBound[i] > UBound[i])
				error("lower bound is greater than upper bound");
			if (typicalX != null) {
				if (LBound[i] > typicalX[i])
					error("lower bound '" + LBound[i]
							+ "'is greater than typicalX" + typicalX[i]);
				if (UBound[i] < typicalX[i])
					error("upper bound '" + UBound[i]
							+ "' is smaller than typicalX " + typicalX[i]);
			}
		}
		initialX = xmean.clone(); // keep finally chosen initialX

		timings.start = System.currentTimeMillis();
		timings.starteigen = System.currentTimeMillis();

		state = 0;

		return new double[sp.getLambda()];

	} // init()

	/**
	 * get default parameters in new CMAParameters instance, dimension must have
	 * been set before calling getDefaults
	 * 
	 * @return default parameters
	 * 
	 * @see CMAParameters#getDefaults(int)
	 */
	public CMAParameters getParameterDefaults() {
		return sp.getDefaults(N);
	}

	/**
	 * get default parameters in new CMAParameters instance
	 * 
	 * @param N
	 *            dimensionality
	 * @return Parameters
	 * 
	 * @see CMAParameters#getDefaults(int)
	 */
	public CMAParameters getParameterDefaults(int N) {
		return sp.getDefaults(N);
	}

	Properties properties = new Properties();

	// private void infoVerbose(String s) {
	// println(" CMA-ES info: " + s);
	// }

	private void warning(String s) {
		parentOptimization.pushMessage(" CMA-ES warning: " + s);
	}

	private void error(String s) { // somehow a relict from the C history of
									// this code
		parentOptimization.pushError(new CMAException(" CMA-ES error: " + s));
		// System.exit(-1);
	}

	/** some simple math utilities */
	class MyMath { // implements java.io.Serializable {
		int itest;

		double square(double d) {
			return d * d;
		}

		double prod(double[] ar) {
			double res = 1.0;
			for (int i = 0; i < ar.length; ++i)
				res *= ar[i];
			return res;
		}

		public double median(double ar[]) {
			// need a copy of ar
			double[] ar2 = new double[ar.length];
			for (int i = 0; i < ar.length; ++i)
				ar2[i] = ar[i];
			Arrays.sort(ar2);
			if (ar2.length % 2 == 0)
				return (ar2[ar.length / 2] + ar2[ar.length / 2 - 1]) / 2.;
			else
				return ar2[ar.length / 2];
		}

		/**
		 * @param ar
		 *            array of double values
		 * @return Maximum value of 1-D double array
		 */
		public double max(double ar[]) {
			int i;
			double m;
			m = ar[0];
			for (i = 1; i < ar.length; ++i) {
				if (m < ar[i])
					m = ar[i];
			}
			return m;
		}

		/**
		 * sqrt(a^2 + b^2) without under/overflow.
		 * 
		 * @param a
		 * @param b
		 * @return sqrt(a^2 + b^2)
		 **/
		public double hypot(double a, double b) {
			double r = 0;
			if (Math.abs(a) > Math.abs(b)) {
				r = b / a;
				r = Math.abs(a) * Math.sqrt(1 + r * r);
			} else if (b != 0) {
				r = a / b;
				r = Math.abs(b) * Math.sqrt(1 + r * r);
			}
			return r;
		}

		/**
		 * @param ar
		 * @return index of minium value of 1-D double array
		 */
		public int minidx(double ar[]) {
			return minidx(ar, ar.length - 1);
		}

		/**
		 * @return index of minium value of 1-D double array between index 0 and
		 *         maxidx
		 * @param ar
		 *            double[]
		 * @param maxidx
		 *            last index to be considered
		 */
		public int minidx(double[] ar, int maxidx) {
			int i, idx;
			idx = 0;
			for (i = 1; i < maxidx; ++i) {
				if (ar[idx] > ar[i])
					idx = i;
			}
			return idx;
		}

		/**
		 * @return index of minium value of 1-D double array between index 0 and
		 *         maxidx
		 * @param ar
		 *            double[]
		 * @param maxidx
		 *            last index to be considered
		 */
		protected int minidx(IntDouble[] ar, int maxidx) {
			int i, idx;
			idx = 0;
			for (i = 1; i < maxidx; ++i) {
				if (ar[idx].getValue() > ar[i].getValue())
					idx = i;
			}
			return idx;
		}

		/**
		 * @param ar
		 * @return index of maximum value of 1-D double array
		 */
		public int maxidx(double ar[]) {
			int i, idx;
			idx = 0;
			for (i = 1; i < ar.length; ++i) {
				if (ar[idx] < ar[i])
					idx = i;
			}
			return idx;
		}

		/**
		 * @param ar
		 * @return Minimum value of 1-D double array
		 */
		public double min(double ar[]) {
			int i;
			double m;
			m = ar[0];
			for (i = 1; i < ar.length; ++i) {
				if (m > ar[i])
					m = ar[i];
			}
			return m;
		}

		/**
		 * @param ar
		 * @param c
		 * @return Maximum value of 1-D Object array where the object implements
		 *         Comparator Example: max(Double arx, arx[0])
		 */
		public Double max(Double ar[], Comparator<Double> c) {
			int i;
			Double m;
			m = ar[0];
			for (i = 1; i < ar.length; ++i) {
				if (c.compare(m, ar[i]) > 0)
					m = ar[i];
			}
			return m;
		}

		/**
		 * @param ar
		 * @return Maximum value of 1-D IntDouble array
		 */
		public IntDouble max(IntDouble ar[]) {
			int i;
			IntDouble m;
			m = ar[0];
			for (i = 1; i < ar.length; ++i) {
				if (m.compare(m, ar[i]) < 0)
					m = ar[i];
			}
			return m;
		}

		/**
		 * @param ar
		 * @return Minimum value of 1-D IntDouble array
		 */
		public IntDouble min(IntDouble ar[]) {
			int i;
			IntDouble m;
			m = ar[0];
			for (i = 1; i < ar.length; ++i) {
				if (m.compare(m, ar[i]) > 0)
					m = ar[i];
			}
			return m;
		}

		/**
		 * @param ar
		 * @param c
		 * @return Minimum value of 1-D Object array defining a Comparator
		 */
		public Double min(Double ar[], Comparator<Double> c) {
			int i;
			Double m;
			m = ar[0];
			for (i = 1; i < ar.length; ++i) {
				if (c.compare(m, ar[i]) < 0)
					m = ar[i];
			}
			return m;
		}

		/**
		 * @param ar
		 * @return Diagonal of an 2-D double array
		 */
		public double[] diag(double ar[][]) {
			int i;
			double[] diag = new double[ar.length];
			for (i = 0; i < ar.length && i < ar[i].length; ++i)
				diag[i] = ar[i][i];
			return diag;
		}

		/**
		 * @param v
		 * @return 1-D double array of absolute values of an 1-D double array
		 */
		public double[] abs(double v[]) {
			double res[] = new double[v.length];
			for (int i = 0; i < v.length; ++i)
				res[i] = Math.abs(v[i]);
			return res;
		}
	} // MyMath

	class Timing {
		Timing() {
			birth = System.currentTimeMillis();
			start = birth; // on the save side
		}

		long birth; // time at construction, not really in use
		long start; // time at end of init()
		long starteigen; // time after flgdiag was turned off, ie when calls to
							// eigen() start
		long eigendecomposition = 0; // spent time in eigendecomposition
		long writedefaultfiles = 0; // spent time in writeToDefaultFiles
	}

	Timing timings = new Timing();

	/*
	 * flgforce == 1 force independent of time measurments, flgforce == 2 force
	 * independent of uptodate-status
	 */
	void eigendecomposition(int flgforce) {
		/* Update B and D, calculate eigendecomposition */
		int i, j;

		if (countCupdatesSinceEigenupdate == 0 && flgforce < 2)
			return;

		// 20% is usually better in terms of running *time* (only on fast to
		// evaluate functions)
		if (!flgdiag
				&& flgforce <= 0
				&& (timings.eigendecomposition > 1000
						+ options.maxTimeFractionForEigendecomposition
						* (System.currentTimeMillis() - timings.starteigen) || countCupdatesSinceEigenupdate < 1.
						/ sp.getCcov() / N / 5.))
			return;

		if (flgdiag) {
			for (i = 0; i < N; ++i) {
				diagD[i] = Math.sqrt(C[i][i]);
			}
			countCupdatesSinceEigenupdate = 0;
			timings.starteigen = System.currentTimeMillis(); // reset starting
																// time
			timings.eigendecomposition = 0; // not really necessary
		} else {
			// set B <- C
			for (i = 0; i < N; ++i)
				for (j = 0; j <= i; ++j)
					B[i][j] = B[j][i] = C[i][j];

			// eigendecomposition
			double[] offdiag = new double[N];
			long firsttime = System.currentTimeMillis();
			tred2(N, B, diagD, offdiag);
			tql2(N, diagD, offdiag, B);
			timings.eigendecomposition += System.currentTimeMillis()
					- firsttime;

			if (options.checkEigenSystem > 0)
				checkEigenSystem(N, C, diagD, B); // for debugging

			// assign diagD to eigenvalue square roots
			for (i = 0; i < N; ++i) {
				if (diagD[i] < 0) // numerical problem?
					error("an eigenvalue has become negative");
				diagD[i] = Math.sqrt(diagD[i]);
			}
			countCupdatesSinceEigenupdate = 0;
		} // end Update B and D
		if (math.min(diagD) == 0) // error management is done elsewhere
			axisratio = Double.POSITIVE_INFINITY;
		else
			axisratio = math.max(diagD) / math.min(diagD);

	} // eigendecomposition

	/* ========================================================= */
	int checkEigenSystem(int N, double C[][], double diag[], double Q[][])
	/*
	 * exhaustive test of the output of the eigendecomposition needs O(n^3)
	 * operations
	 * 
	 * produces error returns number of detected inaccuracies
	 */
	{
		/* compute Q diag Q^T and Q Q^T to check */
		int i, j, k, res = 0;
		double cc, dd;
		String s;

		for (i = 0; i < N; ++i)
			for (j = 0; j < N; ++j) {
				for (cc = 0., dd = 0., k = 0; k < N; ++k) {
					cc += diag[k] * Q[i][k] * Q[j][k];
					dd += Q[i][k] * Q[j][k];
				}
				/* check here, is the normalization the right one? */
				if (Math.abs(cc - C[i > j ? i : j][i > j ? j : i])
						/ Math.sqrt(C[i][i] * C[j][j]) > 1e-10
						&& Math.abs(cc - C[i > j ? i : j][i > j ? j : i]) > 1e-9) { /*
																					 * quite
																					 * large
																					 */
					s = " " + i + " " + j + " " + cc + " "
							+ C[i > j ? i : j][i > j ? j : i] + " "
							+ (cc - C[i > j ? i : j][i > j ? j : i]);
					warning("cmaes_t:Eigen(): imprecise result detected " + s);
					++res;
				}
				if (Math.abs(dd - (i == j ? 1 : 0)) > 1e-10) {
					s = i + " " + j + " " + dd;
					warning("cmaes_t:Eigen(): imprecise result detected (Q not orthog.) "
							+ s);
					++res;
				}
			}
		return res;
	}

	// Symmetric Householder reduction to tridiagonal form, taken from JAMA
	// package.

	private void tred2(int n, double V[][], double d[], double e[]) {

		// This is derived from the Algol procedures tred2 by
		// Bowdler, Martin, Reinsch, and Wilkinson, Handbook for
		// Auto. Comp., Vol.ii-Linear Algebra, and the corresponding
		// Fortran subroutine in EISPACK.

		for (int j = 0; j < n; j++) {
			d[j] = V[n - 1][j];
		}

		// Householder reduction to tridiagonal form.

		for (int i = n - 1; i > 0; i--) {

			// Scale to avoid under/overflow.

			double scale = 0.0;
			double h = 0.0;
			for (int k = 0; k < i; k++) {
				scale = scale + Math.abs(d[k]);
			}
			if (scale == 0.0) {
				e[i] = d[i - 1];
				for (int j = 0; j < i; j++) {
					d[j] = V[i - 1][j];
					V[i][j] = 0.0;
					V[j][i] = 0.0;
				}
			} else {

				// Generate Householder vector.

				for (int k = 0; k < i; k++) {
					d[k] /= scale;
					h += d[k] * d[k];
				}
				double f = d[i - 1];
				double g = Math.sqrt(h);
				if (f > 0) {
					g = -g;
				}
				e[i] = scale * g;
				h = h - f * g;
				d[i - 1] = f - g;
				for (int j = 0; j < i; j++) {
					e[j] = 0.0;
				}

				// Apply similarity transformation to remaining columns.

				for (int j = 0; j < i; j++) {
					f = d[j];
					V[j][i] = f;
					g = e[j] + V[j][j] * f;
					for (int k = j + 1; k <= i - 1; k++) {
						g += V[k][j] * d[k];
						e[k] += V[k][j] * f;
					}
					e[j] = g;
				}
				f = 0.0;
				for (int j = 0; j < i; j++) {
					e[j] /= h;
					f += e[j] * d[j];
				}
				double hh = f / (h + h);
				for (int j = 0; j < i; j++) {
					e[j] -= hh * d[j];
				}
				for (int j = 0; j < i; j++) {
					f = d[j];
					g = e[j];
					for (int k = j; k <= i - 1; k++) {
						V[k][j] -= (f * e[k] + g * d[k]);
					}
					d[j] = V[i - 1][j];
					V[i][j] = 0.0;
				}
			}
			d[i] = h;
		}

		// Accumulate transformations.

		for (int i = 0; i < n - 1; i++) {
			V[n - 1][i] = V[i][i];
			V[i][i] = 1.0;
			double h = d[i + 1];
			if (h != 0.0) {
				for (int k = 0; k <= i; k++) {
					d[k] = V[k][i + 1] / h;
				}
				for (int j = 0; j <= i; j++) {
					double g = 0.0;
					for (int k = 0; k <= i; k++) {
						g += V[k][i + 1] * V[k][j];
					}
					for (int k = 0; k <= i; k++) {
						V[k][j] -= g * d[k];
					}
				}
			}
			for (int k = 0; k <= i; k++) {
				V[k][i + 1] = 0.0;
			}
		}
		for (int j = 0; j < n; j++) {
			d[j] = V[n - 1][j];
			V[n - 1][j] = 0.0;
		}
		V[n - 1][n - 1] = 1.0;
		e[0] = 0.0;
	}

	// Symmetric tridiagonal QL algorithm, taken from JAMA package.

	private void tql2(int n, double d[], double e[], double V[][]) {

		// This is derived from the Algol procedures tql2, by
		// Bowdler, Martin, Reinsch, and Wilkinson, Handbook for
		// Auto. Comp., Vol.ii-Linear Algebra, and the corresponding
		// Fortran subroutine in EISPACK.

		for (int i = 1; i < n; i++) {
			e[i - 1] = e[i];
		}
		e[n - 1] = 0.0;

		double f = 0.0;
		double tst1 = 0.0;
		double eps = Math.pow(2.0, -52.0);
		for (int l = 0; l < n; l++) {

			// Find small subdiagonal element

			tst1 = Math.max(tst1, Math.abs(d[l]) + Math.abs(e[l]));
			int m = l;
			while (m < n) {
				if (Math.abs(e[m]) <= eps * tst1) {
					break;
				}
				m++;
			}

			// If m == l, d[l] is an eigenvalue,
			// otherwise, iterate.

			if (m > l) {
				int iter = 0;
				do {
					iter = iter + 1; // (Could check iteration count here.)

					// Compute implicit shift

					double g = d[l];
					double p = (d[l + 1] - g) / (2.0 * e[l]);
					double r = math.hypot(p, 1.0);
					if (p < 0) {
						r = -r;
					}
					d[l] = e[l] / (p + r);
					d[l + 1] = e[l] * (p + r);
					double dl1 = d[l + 1];
					double h = g - d[l];
					for (int i = l + 2; i < n; i++) {
						d[i] -= h;
					}
					f = f + h;

					// Implicit QL transformation.

					p = d[m];
					double c = 1.0;
					double c2 = c;
					double c3 = c;
					double el1 = e[l + 1];
					double s = 0.0;
					double s2 = 0.0;
					for (int i = m - 1; i >= l; i--) {
						c3 = c2;
						c2 = c;
						s2 = s;
						g = c * e[i];
						h = c * p;
						r = math.hypot(p, e[i]);
						e[i + 1] = s * r;
						s = e[i] / r;
						c = p / r;
						p = c * d[i] - s * g;
						d[i + 1] = h + s * (c * g + s * d[i]);

						// Accumulate transformation.

						for (int k = 0; k < n; k++) {
							h = V[k][i + 1];
							V[k][i + 1] = s * V[k][i] + c * h;
							V[k][i] = c * V[k][i] - s * h;
						}
					}
					p = -s * s2 * c3 * el1 * e[l] / dl1;
					e[l] = s * p;
					d[l] = c * p;

					// Check for convergence.

				} while (Math.abs(e[l]) > eps * tst1);
			}
			d[l] = d[l] + f;
			e[l] = 0.0;
		}

		// Sort eigenvalues and corresponding vectors.

		for (int i = 0; i < n - 1; i++) {
			int k = i;
			double p = d[i];
			for (int j = i + 1; j < n; j++) {
				if (d[j] < p) { // NH find smallest k>i
					k = j;
					p = d[j];
				}
			}
			if (k != i) {
				d[k] = d[i]; // swap k and i
				d[i] = p;
				for (int j = 0; j < n; j++) {
					p = V[j][i];
					V[j][i] = V[j][k];
					V[j][k] = p;
				}
			}
		}
	} // tql2

	/**
	 * not really in use so far, just clones and copies
	 * 
	 * @param popx
	 *            genotype
	 * @param popy
	 *            phenotype, repaired
	 * @return popy
	 */
	double[][] genoPhenoTransformation(double[][] popx, double[][] popy) {
		if (popy == null || popy == popx || popy.length != popx.length)
			popy = new double[popx.length][];

		for (int i = 0; i < popy.length; ++i)
			popy[i] = genoPhenoTransformation(popx[i], popy[i]);

		return popy;
	}

	/**
	 * not really in use so far, just clones and copies
	 * 
	 * @param popx
	 *            genotype
	 * @param popy
	 *            phenotype, repaired
	 * @return popy
	 */
	double[][] phenoGenoTransformation(double[][] popx, double[][] popy) {
		if (popy == null || popy == popx || popy.length != popx.length)
			popy = new double[popx.length][];

		for (int i = 0; i < popy.length; ++i)
			popy[i] = phenoGenoTransformation(popx[i], popy[i]);

		return popy;
	}

	/**
	 * not really in use so far, just clones and copies
	 * 
	 * @param x
	 *            genotype
	 * @param y
	 *            phenotype
	 * @return y
	 */
	double[] genoPhenoTransformation(double[] x, double[] y) {
		if (y == null || y == x || y.length != x.length) {
			y = x.clone();
			return y; // for now return an identical copy
		}
		for (int i = 0; i < N; ++i)
			y[i] = x[i];
		return y;
	}

	/**
	 * not really in use so far, just clones and copies
	 * 
	 * @param x
	 *            genotype
	 * @param y
	 *            phenotype
	 * @return y
	 */
	double[] phenoGenoTransformation(double[] x, double[] y) {
		if (y == null || y == x || y.length != x.length) {
			y = x.clone();
			return y; // for now return an identical copy
		}
		for (int i = 0; i < N; ++i)
			y[i] = x[i];
		return y;
	}

	/**
	 * Samples the recent search distribution lambda times
	 * 
	 * @return double[][] population, lambda times dimension array of sampled
	 *         solutions, where
	 *         <code>lambda == parameters.getPopulationSize()</code>
	 * @see #updateDistribution(double[])
	 * @see CMAParameters#getPopulationSize()
	 */
	public double[][] samplePopulation() {
		int i, j, iNk;
		double sum;

		if (state < 0)
			init();
		else if (state < 3 && state > 2)
			error("mixing of calls to updateSingle() and samplePopulation() is not possible");
		else
			eigendecomposition(0); // latest possibility to generate B and diagD

		if (state != 1)
			++countiter;
		state = 1; // can be repeatedly called without problem
		idxRecentOffspring = sp.getLambda() - 1; // not really necessary at the
													// moment

		// ensure maximal and minimal standard deviations
		if (options.lowerStandardDeviations != null
				&& options.lowerStandardDeviations.length > 0)
			for (i = 0; i < N; ++i) {
				double d = options.lowerStandardDeviations[Math.min(i,
						options.lowerStandardDeviations.length - 1)];
				if (d > sigma * minsqrtdiagC)
					sigma = d / minsqrtdiagC;
			}
		if (options.upperStandardDeviations != null
				&& options.upperStandardDeviations.length > 0)
			for (i = 0; i < N; ++i) {
				double d = options.upperStandardDeviations[Math.min(i,
						options.upperStandardDeviations.length - 1)];
				if (d < sigma * maxsqrtdiagC)
					sigma = d / maxsqrtdiagC;
			}

		testAndCorrectNumerics();

		/* sample the distribution */
		for (iNk = 0; iNk < sp.getLambda(); ++iNk) { /*
													 * generate scaled random
													 * vector (D * z)
													 */

			// code duplication from resampleSingle because of possible future
			// resampling before GenoPheno
			/* generate scaled random vector (D * z) */
			if (flgdiag)
				for (i = 0; i < N; ++i)
					arx[iNk][i] = xmean[i] + sigma * diagD[i]
							* rand.nextGaussian();
			else {
				for (i = 0; i < N; ++i)
					artmp[i] = diagD[i] * rand.nextGaussian();

				/* add mutation (sigma * B * (D*z)) */
				for (i = 0; i < N; ++i) {
					for (j = 0, sum = 0; j < N; ++j)
						sum += B[i][j] * artmp[j];
					arx[iNk][i] = xmean[i] + sigma * sum;
				}
			}
			// redo this while isOutOfBounds(arx[iNk])
		}

		// I am desperately missing a const/readonly/visible qualifier.
		return population = genoPhenoTransformation(arx, population);

	} // end samplePopulation()

	/**
	 * compute Mahalanobis norm of x - mean w.r.t. the current distribution
	 * (using covariance matrix times squared step-size for the inner product).
	 * 
	 * @param x
	 * @param mean
	 * @return Malanobis norm of x - mean: sqrt((x-mean)' C^-1 (x-mean)) / sigma
	 */
	public double mahalanobisNorm(double[] x, double[] mean) {
		double yi, snorm = 0;
		int i, j;
		// snorm = (x-mean)' Cinverse (x-mean) = (x-mean)' (BD^2B')^-1 (x-mean)
		// = (x-mean)' B'^-1 D^-2 B^-1 (x-mean)
		// = (x-mean)' B D^-1 D^-1 B' (x-mean)
		// = (D^-1 B' (x-mean))' * (D^-1 B' (x-mean))
		/*
		 * calculate z := D^(-1) * B^(-1) * BDz into artmp, we could have stored
		 * z instead
		 */
		for (i = 0; i < N; ++i) {
			for (j = 0, yi = 0.; j < N; ++j)
				yi += B[j][i] * (x[j] - mean[j]);
			// yi = i-th component of B' (x-mean)
			snorm += yi * yi / diagD[i] / diagD[i];
		}
		return Math.sqrt(snorm) / sigma;
	}

	/**
	 * update of the search distribution from a population and its function
	 * values, see {@link #updateDistribution(double[][], double[], 0)}. This
	 * might become updateDistribution(double[][], double[], popsize) in future.
	 * 
	 * @param population
	 *            double[lambda][N], lambda solutions
	 * @param functionValues
	 *            double[lambda], respective objective values of population
	 * 
	 * @see #samplePopulation()
	 * @see #updateDistribution(double[])
	 * @see #updateDistribution(double[][], double[], int)
	 */
	@SuppressWarnings("javadoc")
	public void updateDistribution(double[][] population,
			double[] functionValues) {
		updateDistribution(population, functionValues, 0);
	}

	/**
	 * update of the search distribution from a population and its function
	 * values, an alternative interface for
	 * {@link #updateDistribution(double[] functionValues)}. functionValues is
	 * used to establish an ordering of the elements in population. The first
	 * nInjected elements do not need to originate from #samplePopulation() or
	 * can have been modified.
	 * 
	 * @param population
	 *            double[lambda][N], lambda solutions
	 * @param functionValues
	 *            double[lambda], respective objective values of population
	 * @param nInjected
	 *            int, first nInjected solutions of population were not sampled
	 *            by samplePopulation() or modified afterwards
	 * 
	 * @see #samplePopulation()
	 * @see #updateDistribution(double[])
	 */
	public void updateDistribution(double[][] population,
			double[] functionValues, int nInjected) {
		// pass first input argument
		arx = phenoGenoTransformation(population, null);
		for (int i = 0; i < nInjected; ++i) {
			warning("TODO: checking of injected solution has not yet been tested");
			// if (mahalanobisNorm(arx[0], xmean) > Math.sqrt(N) + 2) //
			// testing: seems fine
			// System.out.println(mahalanobisNorm(arx[i], xmean)/Math.sqrt(N));
			double upperLength = Math.sqrt(N) + 2. * N / (N + 2.); // should
																	// become an
																	// interfaced
																	// parameter?
			double fac = upperLength / mahalanobisNorm(arx[i], xmean);
			if (fac < 1)
				for (int j = 0; j < N; ++j)
					arx[i][j] = xmean[j] + fac * (arx[i][j] - xmean[j]);
		}
		updateDistribution(functionValues);
	}

	/**
	 * update of the search distribution after samplePopulation().
	 * functionValues determines the selection order (ranking) for the solutions
	 * in the previously sampled population. This is just a different interface
	 * for updateDistribution(double[][], double[]).
	 * 
	 * @param functionValues
	 * @see #samplePopulation()
	 * @see #updateDistribution(double[][], double[])
	 */
	public void updateDistribution(double[] functionValues) {
		if (state == 3) {
			error("updateDistribution() was already called");
		}
		if (functionValues.length != sp.getLambda())
			error("argument double[] funcionValues.length="
					+ functionValues.length + "!=" + "lambda=" + sp.getLambda());

		/* pass input argument */
		for (int i = 0; i < sp.getLambda(); ++i) {
			fit.raw[i].setValue(functionValues[i]);
			fit.raw[i].setIndex(i);
		}

		counteval += sp.getLambda();
		recentFunctionValue = math.min(fit.raw).getValue();
		recentMaxFunctionValue = math.max(fit.raw).getValue();
		recentMinFunctionValue = math.min(fit.raw).getValue();
		updateDistribution();
	}

	/**
	 * Bubblesort implementation to sort individuals,
	 * 
	 * @param ind
	 *            list of individuals.
	 */
	private void sortIndividuals(IntDouble[] ind) {
		boolean unsorted = true;
		IntDouble temp;
		while (unsorted) {
			unsorted = false;
			for (int i = 0; i < ind.length - 1; i++) {
				if (getParentOptimization().getFitness().compare(
						ind[i].getValue(), ind[i + 1].getValue()) == 2) {
					temp = ind[i];
					ind[i] = ind[i + 1];
					ind[i + 1] = temp;
				}
			}
		}

	}

	private void updateDistribution() {

		int i, j, k, iNk, hsig;
		double sum;
		double psxps;

		if (state == 3) {
			error("updateDistribution() was already called");
		}
		sortIndividuals(fit.raw);
		/* sort function values */
		// Arrays.sort(fit.raw, fit.raw[0]);

		for (iNk = 0; iNk < sp.getLambda(); ++iNk) {
			fit.fitness[iNk].setValue(fit.raw[iNk].getValue()); // superfluous
																// at time
			fit.fitness[iNk].setIndex(fit.raw[iNk].getIndex());
		}

		/* update fitness history */
		for (i = fit.history.length - 1; i > 0; --i)
			fit.history[i] = fit.history[i - 1];
		fit.history[0] = fit.raw[0].getValue();

		/* save/update bestever-value */
		updateBestEver(arx[fit.raw[0].getIndex()], fit.raw[0].getValue(),
				counteval - sp.getLambda() + fit.raw[0].getIndex() + 1);

		/* re-calculate diagonal flag */
		flgdiag = (options.diagonalCovarianceMatrix == 1 || options.diagonalCovarianceMatrix >= countiter);
		if (options.diagonalCovarianceMatrix == -1) // options might have been
													// re-read
			flgdiag = (countiter <= 1 * 150 * N / sp.lambda); // CAVE:
																// duplication
																// of "default"

		/* calculate xmean and BDz~N(0,C) */
		for (i = 0; i < N; ++i) {
			xold[i] = xmean[i];
			xmean[i] = 0.;
			for (iNk = 0; iNk < sp.getMu(); ++iNk)
				xmean[i] += sp.getWeights()[iNk]
						* arx[fit.fitness[iNk].getIndex()][i];
			BDz[i] = Math.sqrt(sp.getMueff()) * (xmean[i] - xold[i]) / sigma;
		}

		/* cumulation for sigma (ps) using B*z */
		if (flgdiag) {
			/* given B=I we have B*z = z = D^-1 BDz */
			for (i = 0; i < N; ++i) {
				ps[i] = (1. - sp.getCs()) * ps[i]
						+ Math.sqrt(sp.getCs() * (2. - sp.getCs())) * BDz[i]
						/ diagD[i];
			}
		} else {
			/*
			 * calculate z := D^(-1) * B^(-1) * BDz into artmp, we could have
			 * stored z instead
			 */
			for (i = 0; i < N; ++i) {
				for (j = 0, sum = 0.; j < N; ++j)
					sum += B[j][i] * BDz[j];
				artmp[i] = sum / diagD[i];
			}
			/* cumulation for sigma (ps) using B*z */
			for (i = 0; i < N; ++i) {
				for (j = 0, sum = 0.; j < N; ++j)
					sum += B[i][j] * artmp[j];
				ps[i] = (1. - sp.getCs()) * ps[i]
						+ Math.sqrt(sp.getCs() * (2. - sp.getCs())) * sum;
			}
		}

		/* calculate norm(ps)^2 */
		psxps = 0;
		for (i = 0; i < N; ++i)
			psxps += ps[i] * ps[i];

		/* cumulation for covariance matrix (pc) using B*D*z~N(0,C) */
		hsig = 0;
		if (Math.sqrt(psxps)
				/ Math.sqrt(1. - Math.pow(1. - sp.getCs(), 2. * countiter))
				/ sp.chiN < 1.4 + 2. / (N + 1.)) {
			hsig = 1;
		}
		for (i = 0; i < N; ++i) {
			pc[i] = (1. - sp.getCc()) * pc[i] + hsig
					* Math.sqrt(sp.getCc() * (2. - sp.getCc())) * BDz[i];
		}

		/* stop initial phase, not in use anymore as hsig does the job */
		if (iniphase
				&& countiter > Math.min(1 / sp.getCs(), 1 + N / sp.getMucov()))
			if (psxps / sp.getDamps()
					/ (1. - Math.pow((1. - sp.getCs()), countiter)) < N * 1.05)
				iniphase = false;

		/*
		 * this, it is harmful in a dynamic environment remove momentum in ps,
		 * if ps is large and fitness is getting worse
		 */
		// if (1 < 3 && psxps / N > 1.5 + 10 * Math.sqrt(2. / N)
		// && fit.history[0] > fit.history[1] && fit.history[0] >
		// fit.history[2]) {
		// double tfac;
		//
		// infoVerbose(countiter + ": remove momentum " + psxps / N + " "
		// + ps[0] + " " + sigma);
		//
		// tfac = Math.sqrt((1 + Math.max(0, Math.log(psxps / N))) * N / psxps);
		// for (i = 0; i < N; ++i)
		// ps[i] *= tfac;
		// psxps *= tfac * tfac;
		// }

		/* update of C */
		if (sp.getCcov() > 0 && iniphase == false) {

			++countCupdatesSinceEigenupdate;

			/* update covariance matrix */
			for (i = 0; i < N; ++i)
				for (j = (flgdiag ? i : 0); j <= i; ++j) {
					C[i][j] = (1 - sp.getCcov(flgdiag))
							* C[i][j]
							+ sp.getCcov()
							* (1. / sp.getMucov())
							* (pc[i] * pc[j] + (1 - hsig) * sp.getCc()
									* (2. - sp.getCc()) * C[i][j]);
					for (k = 0; k < sp.getMu(); ++k) { /*
														 * additional rank mu
														 * update
														 */
						C[i][j] += sp.getCcov() * (1 - 1. / sp.getMucov())
								* sp.getWeights()[k]
								* (arx[fit.fitness[k].getIndex()][i] - xold[i])
								* (arx[fit.fitness[k].getIndex()][j] - xold[j])
								/ sigma / sigma;
					}
				}
			maxsqrtdiagC = Math.sqrt(math.max(math.diag(C)));
			minsqrtdiagC = Math.sqrt(math.min(math.diag(C)));
		} // update of C

		/* update of sigma */
		sigma *= Math.exp(((Math.sqrt(psxps) / sp.chiN) - 1) * sp.getCs()
				/ sp.getDamps());

		state = 3;

	} // updateDistribution()

	/**
	 * assigns lhs to a different instance with the same values, sort of smart
	 * clone, but it may be that clone is as smart already
	 * 
	 * @param rhs
	 * @param lhs
	 * @return
	 */
	double[] assignNew(double[] rhs, double[] lhs) {
		assert rhs != null; // will produce an error anyway
		if (lhs != null && lhs != rhs && lhs.length == rhs.length)
			for (int i = 0; i < lhs.length; ++i)
				lhs[i] = rhs[i];
		else
			lhs = rhs.clone();
		return lhs;
	}

	void updateBestEver(double[] x, double fitness, long eval) {
		if (fitness < bestever_fit || Double.isNaN(bestever_fit)) { // countiter
																	// == 1 not
																	// needed
																	// anymore
			bestever_fit = fitness;
			bestever_eval = eval;
			bestever_x = assignNew(x, bestever_x); // save (hopefully) efficient
													// assignment
		}
	}

	/**
	 * ratio between length of longest and shortest axis of the distribution
	 * ellipsoid, which is the square root of the largest divided by the
	 * smallest eigenvalue of the covariance matrix
	 * 
	 * @return axix rotatio
	 */
	public double getAxisRatio() {
		return axisratio;
	}

	/**
	 * get best evaluated search point found so far. Remark that the
	 * distribution mean was not evaluated but is expected to have an even
	 * better function value.
	 * 
	 * @return best search point found so far as double[]
	 * @see #getMeanX()
	 */
	public double[] getBestX() {
		if (state < 0)
			return null;
		return bestever_x.clone();
	}

	/**
	 * objective function value of best solution found so far.
	 * 
	 * @return objective function value of best solution found so far
	 */
	public double getBestFunctionValue() {
		if (state < 0)
			return Double.NaN;
		return bestever_fit;
	}

	/**
	 * @return best solution evaluation
	 */
	public long getBestEvaluationNumber() {
		return bestever_eval;
	}

	/**
	 * best search point of the recent iteration.
	 * 
	 * @return Returns the recentFunctionValue.
	 * @see #getBestRecentFunctionValue()
	 */
	public double[] getBestRecentX() {
		return genoPhenoTransformation(arx[fit.raw[0].getIndex()], null);
	}

	/**
	 * objective function value of the, best solution in the recent iteration
	 * (population)
	 * 
	 * @return Returns the recentFunctionValue.
	 * @see #getBestEvaluationNumber()
	 * @see #getBestFunctionValue()
	 */
	public double getBestRecentFunctionValue() {
		return recentMinFunctionValue;
	}

	/**
	 * objective function value of the, worst solution of the recent iteration.
	 * 
	 * @return Returns the recentMaxFunctionValue.
	 */
	public double getWorstRecentFunctionValue() {
		return recentMaxFunctionValue;
	}

	/**
	 * Get mean of the current search distribution. The mean should be regarded
	 * as the best estimator for the global optimimum at the given iteration. In
	 * particular for noisy problems the distribution mean is the solution of
	 * choice preferable to the best or recent best. The return value is
	 * <em>not</em> a copy. Therefore it should not be change it, without deep
	 * knowledge of the code (the effect of a mean change depends on the chosen
	 * transscription/implementation of the algorithm).
	 * 
	 * @return mean value of the current search distribution
	 * @see #getBestX()
	 * @see #getBestRecentX()
	 */
	public double[] getMeanX() {
		return xmean.clone();
	}

	/**
	 * 
	 * @return dimensionality
	 */
	public int getDimension() {
		return N;
	}

	/**
	 * number of objective function evaluations counted so far
	 * 
	 * @return number of evaluations
	 */
	public long getCountEval() {
		return counteval;
	}

	/**
	 * number of iterations conducted so far
	 * 
	 * @return number of iterations
	 */
	public long getCountIter() {
		return countiter;
	}

	/**
	 * the final setting of initial <code>x</code> can be retrieved only after
	 * <code>init()</code> was called
	 * 
	 * @return <code>double[] initialX</code> start point chosen for
	 *         distribution mean value <code>xmean</code>
	 */
	public double[] getInitialX() {
		if (state < 0)
			error("initiaX not yet available, init() must be called first");
		return initialX.clone();
	}

	/**
	 * get properties previously read from a property file.
	 * 
	 * @return java.util.Properties key-value hash table
	 */
	public Properties getProperties() {
		return properties;
	}

	/**
	 * number of objective function evaluations counted so far
	 * 
	 * @param c
	 * @return evaluation count
	 */
	public long setCountEval(long c) {
		return counteval = c;
	}

	/**
	 * search space dimensions must be set before the optimization is started.
	 * 
	 * @param n
	 */
	public void setDimension(int n) {
		if ((lockDimension > 0 || state >= 0) && N != n)
			error("dimension cannot be changed anymore or contradicts to initialX");
		N = n;
	}

	/**
	 * sets typicalX value, the same value in each coordinate
	 * 
	 * @param x
	 * @see #setTypicalX(double[])
	 */
	public void setTypicalX(double x) {
		if (state >= 0)
			error("typical x cannot be set anymore");
		typicalX = new double[] { x }; // allows "late binding" of dimension
	}

	/**
	 * sets typicalX value, which will be overwritten by initialX setting from
	 * properties or {@link #setInitialX(double[])} function call. Otherwise the
	 * initialX is sampled normally distributed from typicalX with
	 * initialStandardDeviations
	 * 
	 * @param x
	 * 
	 * @see #setTypicalX(double)
	 * @see #setInitialX(double[])
	 * @see #setInitialStandardDeviations(double[])
	 */
	public void setTypicalX(double[] x) {
		if (state >= 0)
			error("typical x cannot be set anymore");
		if (x.length == 1) { // to make properties work
			setTypicalX(x[0]);
			return;
		}
		if (N < 1)
			setDimension(x.length);
		if (N != x.length)
			error("dimensions N=" + N + " and input x.length=" + x.length
					+ "do not agree");
		typicalX = new double[N];
		for (int i = 0; i < N; ++i)
			typicalX[i] = x[i];
		lockDimension = 1;
	}

	/**
	 * Sets the initial standard deviation
	 * 
	 * @param startsigma
	 */
	public void setInitialStandardDeviation(double startsigma) {
		if (state >= 0)
			error("standard deviations cannot be set anymore");
		this.startsigma = new double[] { startsigma };
	}

	/**
	 * Sets the initial standard deviation
	 * 
	 * @param startsigma
	 */
	public void setInitialStandardDeviations(double[] startsigma) {
		// assert startsigma != null; // assert should not be used for public
		// arg check
		if (state >= 0)
			error("standard deviations cannot be set anymore");
		if (startsigma.length == 1) { // to make properties work
			setInitialStandardDeviation(startsigma[0]);
			return;
		}
		if (N > 0 && N != startsigma.length)
			error("dimensions N=" + N + " and input startsigma.length="
					+ startsigma.length + "do not agree");
		if (N == 0)
			setDimension(startsigma.length);
		assert N == startsigma.length;
		this.startsigma = startsigma.clone();
		lockDimension = 1;
	}

	/**
	 * sets <code>initialX</code> to the same value in each coordinate
	 * 
	 * @param x
	 *            value
	 * @see #setInitialX(double[])
	 */
	public void setInitialX(double x) {
		if (state >= 0)
			error("initial x cannot be set anymore");
		xmean = new double[] { x }; // allows "late binding" of dimension N
	}

	/**
	 * set initial seach point <code>xmean</code> coordinate-wise uniform
	 * between <code>l</code> and <code>u</code>, dimension needs to have been
	 * set before
	 * 
	 * @param l
	 *            double lower value
	 * @param u
	 *            double upper value
	 * @see #setInitialX(double[])
	 * @see #setInitialX(double[], double[])
	 * */
	public void setInitialX(double l, double u) {
		if (state >= 0)
			error("initial x cannot be set anymore");
		if (N < 1)
			error("dimension must have been specified before");
		xmean = new double[N];
		for (int i = 0; i < xmean.length; ++i)
			xmean[i] = l + (u - l) * rand.nextDouble();
		lockDimension = 1;
	}

	/**
	 * set initial seach point <code>x</code> coordinate-wise uniform between
	 * <code>l</code> and <code>u</code>, dimension needs to have been set
	 * before
	 * 
	 * @param l
	 *            double lower value
	 * @param u
	 *            double upper value
	 */
	public void setInitialX(double[] l, double[] u) {
		if (state >= 0)
			error("initial x cannot be set anymore");
		if (l.length != u.length)
			error("length of lower and upper values disagree");
		setDimension(l.length);
		xmean = new double[N];
		for (int i = 0; i < xmean.length; ++i)
			xmean[i] = l[i] + (u[i] - l[i]) * rand.nextDouble();
		lockDimension = 1;
	}

	/**
	 * set initial search point to input value <code>x</code>.
	 * <code>x.length==1</code> is possible, otherwise the search space
	 * dimension is set to <code>x.length</code> irrevocably
	 * 
	 * @param x
	 *            double[] initial point
	 * @see #setInitialX(double)
	 * @see #setInitialX(double, double)
	 */
	public void setInitialX(double[] x) {
		if (state >= 0)
			error("initial x cannot be set anymore");
		if (x.length == 1) { // to make properties work
			setInitialX(x[0]);
			return;
		}
		if (N > 0 && N != x.length)
			error("dimensions do not match");
		if (N == 0)
			setDimension(x.length);
		assert N == x.length;
		xmean = new double[N];
		for (int i = 0; i < N; ++i)
			xmean[i] = x[i];
		lockDimension = 1; // because xmean is set up
	}

	/**
	 * @param lambda
	 */
	public void setLambda(int lambda) {
		sp.setPopulationSize(lambda);
	}

	/**
	 * @param b
	 */
	public void setLBound(double[] b) {
		LBound = b;
	}

	/**
	 * @param b
	 */
	public void setUBound(double[] b) {
		UBound = b;
	}

	/**
	 * 
	 * @param rng
	 *            new Random number generator
	 */
	public void setRNG(RandomNumberGenerator rng) {
		this.rand = rng;
	}

	/**
	 * @return the currents parameters
	 */
	public CMAParameters getParameters() {
		return sp;
	}

	/**
	 * @return the parent optimization
	 */
	public NaturalOptimization getParentOptimization() {
		return parentOptimization;
	}

	/**
	 * @param parentOptimization
	 */
	public void setParentOptimization(NaturalOptimization parentOptimization) {
		this.parentOptimization = parentOptimization;
	}

	/**
	 * very provisional error handling. Methods of the class
	 * CMAEvolutionStrategy might throw the CMAException, that need not be
	 * catched, because it extends the "unchecked" RuntimeException class
	 */
	public class CMAException extends RuntimeException {
		private static final long serialVersionUID = 1L;

		CMAException(String s) {
			super(s);
		}
	}

}
