/* bredeche(at)lri.fr
 * Created on 14 dc. 2006
 */

package piconode.factory;

import java.util.ArrayList;
import java.util.Random;

import piconode.core.arc.LearnableArc;
import piconode.core.node.EchoStateNetwork;
import piconode.core.node.RecurrentNeuron;
import piconode.ext.ActivationFunction_HyperbolicTangent;
import piconode.ext.ActivationFunction_Linear;
import piconode.factory.objectivefunctions.ObjectiveFunction;
import piconode.toolbox.Display;
import piconode.toolbox.Tools;
import piconode.tutorials.Sin7ObjectiveFunction;
import piconode.visualpiconode.Visualizer;

/**
 * 
 * ------------------
 * 
 * !!! Echo State Network - piconode implementation !!! implementation:
 * nicolas.bredeche(at)lri.fr, 20070615 !!! version 20070628.
 * 
 * Validated according to experiment on [jaeger01], section 3.1 :
 * 
 * *Architecture** - reservoir size: 100 - cnx density: 5% - lambda: 0.88 -
 * noise value: 0.01 - all other weights initialized btw -1 and +1 - tanh
 * function for all reservoir and output neurons - one input node, one output
 * node
 * 
 * *I/O** - input signal: sin(n/5) - output teacher signal: 0.5 * ( ( sin( ( 2 *
 * PI * n ) / ( 10 * PI ) ) ^ 7 );
 * 
 * *Experimental setup** - scenario - 000..099: washing out internal dynamics -
 * 100..299: learning with teacher forcing - 300..399: evaluation with teacher
 * forcing [optional] - 400..499: evaluation without teacher forcing - 500..599:
 * perturbating ESN by artificially adding noise in input nodes for 100 steps
 * [optional] - 600..699: evaluation of recovery from perturbation [optional] -
 * test on 11 experiments, evaluation without teacher forcing (steps 400..499) -
 * results: (step 499, i.e. end of evaluation without teacher forcing, accuracy
 * based on 100 output values) - average (accuracy +/- stddev) : 2.45E-7 +/-
 * 1.12E-7 - raw figures: 1.7947572257849948E-7 1.4980238232567466E-7
 * 1.0619531166560923E-7 3.6242738876530094E-7 4.4945476868764033E-7
 * 2.2033390420198677E-7 2.050765929624872E-7 1.2527115345365452E-7
 * 3.9176743202847796E-7 3.000697982236981E-7 2.0667575767836233E-7 - these are
 * *average* figures so they might over-estimate errors in some cases (i.e. take
 * also into account error from initial steps which may be non representative,
 * especially when estimating error during recovery phase where initial steps
 * are still perturbated)
 * 
 * *Bibliography:** - [jaeger01] H. Jaeger. the "echo state" approach to
 * analysing and training recurrent neural network, published as GMD techreport
 * 148, 2001. Corrected on-line version. - [jaeger02] H. Jaeger. Tutorial on
 * training recurrent neural networks, covering BPPT, RTRL, EKF and the "echo
 * state network" approach. published as GMD Techreport 159, 2002. see also -
 * http://www.faculty.iu-bremen.de/hjaeger/esn_research.html (Jaeger's web page) -
 * http://www.esn-lsm.tugraz.at/index.php/Main_Page (NIPS 2006 workshop on ESN
 * and LSM)
 * 
 * *TODO - under development** - allow direct connections from inputs to outputs -
 * allow recurrent connections from outputs to outputs - test numerous output
 * architecture
 * 
 * ------------------
 */
public class EchoStateNetworkFactory {

	/**
	 * Returns an Echo State Network. Warning, bias is user-defined, i.e. the
	 * user should create an additional input neuron for implementing the bias
	 * input (firing always 1.0).
	 * 
	 * @param __nbIn
	 *            desired number of inputs + 1 (bias neuron, to be manually set
	 *            to 1.0)
	 * @param __reservoirSize
	 *            number of hidden units
	 * @param __nbOut
	 * @param __connectionDensity
	 *            (btw 0 and 1). gives probability to have 1 or -1 rather than 0
	 *            value
	 * @param __spectralRadiusNormalizationValue
	 *            must be <1 to ensure contraction (e.g. 0.9).
	 * @param __inputToReservoirConnections
	 *            consider or ignore inputs to reservoir connections? (any
	 *            neural network in piconode needs at least 1 input node ; as
	 *            ESN eventually may not need any input, the user should define
	 *            a single input unit and set this parameter to false (input arc
	 *            weight will be zero))
	 * @param __inputToOutputDirectConnections
	 *            allow direct connections from inputs to outputs ?
	 * @param __outputSelfRecurrentConnections
	 *            allow recurrent connections for each output to itself ?
	 * @param __backwardConnections
	 *            allow backward connections from outputs to reservoir?
	 * @param __inputWeightRange
	 *            initial +/- values of weight values from inputs to reservoir
	 *            (e.g. 1.0 means weights are btw -1 and 1) [fixed weights].
	 * @param __reservoirToOutputWeightRange
	 *            initial +/- values of weight values from reservoir to outputs
	 *            (e.g. 1.0 means weights are btw -1 and 1) [learning weights].
	 * @param __outputToReservoirWeightRange
	 *            initial +/- values of weight values from outputs back to
	 *            reservoir (if backward cnx allowed, otw any value is ok - note
	 *            that this is mandatory for teacher forcing) [fixed weights].
	 * @param __outputToOutputWeightRange
	 *            initial +/- values of weight values each output to all other
	 *            outputs (if reccurent output cnx allowed [learning weights].
	 * @param __noiseValue
	 *            added noise to output signal back to reservoir, noise range
	 *            btw -noiseValue and +noiseValue (e.g. 0.01)
	 * @param __outputTanhActivationFunction
	 *            output Activation Function (true: tanh, false: linear) --
	 *            (note: important issue for learning)
	 * @param __verbose
	 *            if true, display information regarding reservoir
	 *            initialization
	 * @return
	 */
	public static EchoStateNetwork createESN(int __nbIn, int __reservoirSize, int __nbOut, double __connectionDensity, double __spectralRadiusNormalizationValue, boolean __inputToReservoirConnections, boolean __inputToOutputDirectConnections, boolean __outputSelfRecurrentConnections, boolean __backwardConnections, double __inputWeightRange, double __reservoirToOutputWeightRange, double __outputToReservoirWeightRange, double __outputToOutputWeightRange, double __noiseValue,
			boolean __outputTanhActivationFunction, boolean __verbose) {
		Random rand = new Random();
		long seed = rand.nextLong();
		return EchoStateNetworkFactory.createESN(__nbIn, __reservoirSize, __nbOut, __connectionDensity, __spectralRadiusNormalizationValue, __inputToReservoirConnections, __inputToOutputDirectConnections, __outputSelfRecurrentConnections, __backwardConnections, __inputWeightRange, __reservoirToOutputWeightRange, __outputToReservoirWeightRange, __outputToOutputWeightRange, __noiseValue, __outputTanhActivationFunction, __verbose, seed);
	}

	/**
	 * Same as previous constructor, but add a seed value for random generator
	 * (useful to rebuild an esn)
	 * 
	 * @param __nbIn
	 * @param __reservoirSize
	 * @param __nbOut
	 * @param __connectionDensity
	 * @param __spectralRadiusNormalizationValue
	 * @param __inputToReservoirConnections
	 * @param __inputToOutputDirectConnections
	 * @param __outputSelfRecurrentConnections
	 * @param __backwardConnections
	 * @param __inputWeightRange
	 * @param __reservoirToOutputWeightRange
	 * @param __outputToReservoirWeightRange
	 * @param __outputToOutputWeightRange
	 * @param __noiseValue
	 * @param __outputTanhActivationFunction
	 * @param __verbose
	 * @param __seed
	 * @return
	 */
	public static EchoStateNetwork createESN(int __nbIn, int __reservoirSize, int __nbOut, double __connectionDensity, double __spectralRadiusNormalizationValue, boolean __inputToReservoirConnections, boolean __inputToOutputDirectConnections, boolean __outputSelfRecurrentConnections, boolean __backwardConnections, double __inputWeightRange, double __reservoirToOutputWeightRange, double __outputToReservoirWeightRange, double __outputToOutputWeightRange, double __noiseValue,
			boolean __outputTanhActivationFunction, boolean __verbose, long __seed) {
		// System.out.println("seed : " + __seed);

		Random rand = new Random(__seed);

		// for ( int i = 0 ; i != 10 ; i++ )
		// System.out.println(rand.nextDouble());
		// System.exit(-3);

		if (__reservoirSize == 0) {
			System.out.println("[ERROR] EchoStateNetworkFactory.createESN(...) - cannot create a network with zero hidden node.");
			System.exit(-1);
		}

		EchoStateNetwork network = new EchoStateNetwork();

		network.setNoiseValue(__noiseValue);
		network.setNbIn(__nbIn);
		network.setNbOut(__nbOut);
		network.setReservoirSize(__reservoirSize);
		network.setOutputActivationFunction(__outputTanhActivationFunction);
		network.setInputOutputDirectConnections(__inputToOutputDirectConnections);
		network.setOutputSelfRecurrentConnections(__outputSelfRecurrentConnections);

		// ** create input/output/bias neurons

		ArrayList<RecurrentNeuron> inputs = new ArrayList<RecurrentNeuron>();
		for (int i = 0; i != __nbIn; i++) {
			RecurrentNeuron node = new RecurrentNeuron(network, new ActivationFunction_Linear(), "in(" + i + ")");
			inputs.add(node);
			network.registerInputNeuron(node);
		}

		ArrayList<RecurrentNeuron> outputs = new ArrayList<RecurrentNeuron>();
		for (int i = 0; i != __nbOut; i++) {
			RecurrentNeuron node;
			if (__outputTanhActivationFunction)
				node = new RecurrentNeuron(network, new ActivationFunction_HyperbolicTangent(), "out(" + i + ")");
			else
				node = new RecurrentNeuron(network, new ActivationFunction_Linear(), "out(" + i + ")");
			outputs.add(node);
			network.registerOutputNeuron(node);
		}

		ArrayList<RecurrentNeuron> reservoir = new ArrayList<RecurrentNeuron>();
		for (int i = 0; i != __reservoirSize; i++) {
			RecurrentNeuron node = new RecurrentNeuron(network, new ActivationFunction_HyperbolicTangent(), "hidden(" + i + ")");
			reservoir.add(node);
		}

		// ** connect neurons (except reservoir internal dynamics)

		// inputs -> output
		if (__inputToOutputDirectConnections == true)
			for (int i = 0; i != __nbIn; i++)
				for (int j = 0; j != __nbOut; j++) {
					network.registerArc(new LearnableArc(inputs.get(i), outputs.get(j), rand.nextDouble() * 2d * __inputWeightRange - __inputWeightRange));
				}

		// inputs -> reservoir
		for (int i = 0; i != __nbIn; i++)
			for (int j = 0; j != __reservoirSize; j++) {
				if (__inputToReservoirConnections == true)
					network.registerArc(new LearnableArc(inputs.get(i), reservoir.get(j), rand.nextDouble() * 2d * __inputWeightRange - __inputWeightRange));
				else
					network.registerArc(new LearnableArc(inputs.get(i), reservoir.get(j), 0));
			}

		// reservoir -> outputs ; outputs -> reservoir
		for (int i = 0; i != __reservoirSize; i++) {
			for (int j = 0; j != __nbOut; j++) {
				network.registerArc(new LearnableArc(reservoir.get(i), outputs.get(j), rand.nextDouble() * 2d * __reservoirToOutputWeightRange - __reservoirToOutputWeightRange));
				if (__backwardConnections == true)
					network.registerArc(new LearnableArc(outputs.get(j), reservoir.get(i), rand.nextDouble() * 2d * __outputToReservoirWeightRange - __outputToReservoirWeightRange));
			}
		}

		// for each output: output -> output
		if (__outputSelfRecurrentConnections)
			for (int i = 0; i != __nbOut; i++)
				for (int j = 0; j != __nbOut; j++)
					network.registerArc(new LearnableArc(outputs.get(i), outputs.get(j), rand.nextDouble() * 2d * __outputToOutputWeightRange - __outputToOutputWeightRange));

		// ** connect reservoir internal dynamics

		// * build connectivity matrix
		double[][] reservoirCnxMatrix = new double[__reservoirSize][__reservoirSize];
		for (int i = 0; i != __reservoirSize; i++)
			for (int j = 0; j != __reservoirSize; j++)
				if (rand.nextDouble() < __connectionDensity) // p(value=1 or
					// value=-1) =
					// density
					if (rand.nextDouble() < 0.5) // p(value=1) = p(value=-1)
						// = 0.5
						reservoirCnxMatrix[i][j] = 1;
					else
						reservoirCnxMatrix[i][j] = -1;
				else
					reservoirCnxMatrix[i][j] = 0;

		// debug : display reservoir connectivity
		if (__verbose) {
			if (__reservoirSize < 21) {
				System.out.println("### Initializing Reservoir ###\n");
				System.out.println("initial reservoir adjacent matrix : ");
				for (int i = 0; i != __reservoirSize; i++) {
					for (int j = 0; j != __reservoirSize; j++)
						System.out.print("[" + reservoirCnxMatrix[j][i] + "] ");
					System.out.println("");
				}
			} else
				System.out.println("initial reservoir is not displayed (size too big).");
		}

		// * normalize matrix wrt. desired spectral radius

		Matrix M = new Matrix(reservoirCnxMatrix);
		// EigenvalueDecomposition E = new
		// EigenvalueDecomposition(M.plus(M.transpose()).times(0.5));
		EigenvalueDecomposition E = new EigenvalueDecomposition(M);
		double[] spectrum = E.getRealEigenvalues();
		double[] imaginarySpectrum = E.getImagEigenvalues();
		for (int i = 0; i < spectrum.length; i++) {
			spectrum[i] = Math.sqrt(spectrum[i] * spectrum[i] + imaginarySpectrum[i] * imaginarySpectrum[i]);
		}

		// get spectral radius
		double spectralRadius = spectrum[0];
		for (int i = 1; i != __reservoirSize; i++)
			if (spectralRadius < spectrum[i]) // positive value
				spectralRadius = spectrum[i];

		if (spectralRadius == 0) {
			System.err.println("[ERROR] Spectral radius for ESN is *zero* -- setting higher density may help.");
			System.exit(-1);
		}

		// debug
		if (__verbose) {
			System.out.print("\nmatrix spectrum : ");
			for (int i = 0; i != __reservoirSize; i++)
				System.out.print("[" + spectrum[i] + "] ");
			System.out.println("");
			System.out.println("spectral radius : " + spectralRadius);
			System.out.println("");
		}

		// scale and damp weights.
		for (int i = 0; i != __reservoirSize; i++)
			for (int j = 0; j != __reservoirSize; j++)
				reservoirCnxMatrix[i][j] = reservoirCnxMatrix[i][j] / spectralRadius * __spectralRadiusNormalizationValue;

		// debug : display reservoir connectivity after damping
		if (__verbose) {
			// if ( __reservoirSize < 21 )
			{
				System.out.println("reservoir after normalizing and damping : ");
				for (int i = 0; i != __reservoirSize; i++) {
					for (int j = 0; j != __reservoirSize; j++)
						System.out.print("[" + reservoirCnxMatrix[j][i] + "] ");
					System.out.println("");
				}
				System.out.println("");
			}
			// else
			// System.out.println("prepared reservoir is not displayed (size too
			// big).");
		}

		// * connecting reservoir

		for (int i = 0; i != __reservoirSize; i++)
			for (int j = 0; j != __reservoirSize; j++) {
				if (reservoirCnxMatrix[i][j] != 0)
					network.registerArc(new LearnableArc(reservoir.get(i), reservoir.get(j), reservoirCnxMatrix[i][j]));
			}

		// ** network initialization

		network.initNetwork();

		if (__verbose)
			System.out.println("ESN created.\n");

		return network;
	}

	// //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////

	public static double sampleAndLearn(EchoStateNetwork __network, ObjectiveFunction __fn, int __startIt, int __iterations, boolean __verbose) {
		ArrayList<Double> inputValuesList = new ArrayList();
		for (int i = 0; i != __fn.getInputValue(0).length; i++)
			inputValuesList.add(0d);

		int nbIn = __network.getNbIn();
		int nbOut = __network.getNbOut();
		int reservoirSize = __network.getReservoirSize();
		boolean outputTanhFn = __network.getOutputActivationFunction();
		boolean inputOutputDirectConnections = __network.getInputOutputDirectConnections();
		boolean outputSelfRecurrentConnections = __network.getOutputSelfRecurrentConnections();

		int size = reservoirSize;
		if (inputOutputDirectConnections)
			size += nbIn;
		if (outputSelfRecurrentConnections)
			size += nbOut;

		double[][] oracle = new double[__iterations][1];
		double[][] outputAfferentNodesState = new double[__iterations][size]; // nodes
		// projecting
		// on
		// output
		// nodes
		// (possibly:
		// input,
		// reservoir
		// and
		// output
		// nodes)

		double averageError;

		// * learning = sampling + weight computation

		// step 2a : sampling

		if (__verbose) {
			System.out.println("\n### Learning ###");
			System.out.println("#it,oracle,output,average_error");
		}

		int outputAfferentNodeStartIndex; // nodes projecting to output nodes
		// : where to start?
		if (inputOutputDirectConnections)
			outputAfferentNodeStartIndex = 0;
		else
			outputAfferentNodeStartIndex = nbIn;

		int outputAfferentNodeStopIndex;
		if (outputSelfRecurrentConnections)
			outputAfferentNodeStopIndex = nbIn + reservoirSize + nbOut; // outputs
		// are
		// dealt
		// with
		// before
		// storing
		// input
		// and
		// internal
		// node
		// states.
		else
			outputAfferentNodeStopIndex = nbIn + reservoirSize; // no output
		// node states
		// considered

		averageError = 0;

		for (int it = __startIt; it != __startIt + __iterations; it++) {
			double teacherOutput[] = __fn.getOutputValue(it);
			double oracleOutput[] = __fn.getOutputValue(it + 1);

			if (outputTanhFn == true)
				oracle[it - __startIt][0] = Tools.argtanh(oracleOutput[0]); // tanh^-1
			else
				oracle[it - __startIt][0] = oracleOutput[0];

			inputValuesList.clear();
			double[] inputs = __fn.getInputValue(it);
			for (int i = 0; i != inputs.length; i++)
				inputValuesList.add(inputs[i]);

			for (int i = 0; i != __network.getOutputNeuronListSize(); i++)
				__network.getOutputNeuronAt(i).setInputValue(teacherOutput[i]);

			__network.step(inputValuesList);

			if (outputSelfRecurrentConnections) // should also save output nodes
			// state?
			{
				for (int i = outputAfferentNodeStartIndex; i != outputAfferentNodeStopIndex - nbOut; i++) {
					outputAfferentNodesState[it - __startIt][i - outputAfferentNodeStartIndex] = ((RecurrentNeuron) (__network.getNodeAt(i))).getValue();
				}
				for (int i = outputAfferentNodeStopIndex - nbOut; i != outputAfferentNodeStopIndex; i++) {
					outputAfferentNodesState[it - __startIt][i - outputAfferentNodeStartIndex] = teacherOutput[0];
				}
			} else {
				for (int i = outputAfferentNodeStartIndex; i != outputAfferentNodeStopIndex; i++) {
					outputAfferentNodesState[it - __startIt][i - outputAfferentNodeStartIndex] = ((RecurrentNeuron) (__network.getNodeAt(i))).getValue();
				}
			}

			double error = Math.pow((__network.getOutputNeuronAt(0).getValue() - oracleOutput[0]), 2);
			averageError = averageError + error;

			if (__verbose) {
				for (int i = 0; i != __network.getOutputNeuronListSize(); i++) {
					System.out.print(it + "\t" + oracleOutput[i] + "\t");
					System.out.print(__network.getOutputNeuronAt(i).getValue() + "\t");
				}
				System.out.println(averageError / ((it - __startIt + 1)));
			}

		}

		averageError = averageError / ((__iterations));

		// step 2b : weight computation

		Matrix M = new Matrix(outputAfferentNodesState);
		Matrix T = new Matrix(oracle);

		if (__verbose) {
			System.out.println("");
			System.out.println("#M matrix is (" + M.getRowDimension() + "," + M.getColumnDimension() + ").");
			System.out.println("#T matrix is (" + T.getRowDimension() + "," + T.getColumnDimension() + ").");
		}

		/***********************************************************************
		 * // display matrices
		 * 
		 * System.out.println("internal reservoir state M matrix"); for (int i =
		 * 0; i < outputAfferentNodesState.length; i++) { for (int j = 0; j <
		 * outputAfferentNodesState[0].length; j++) {
		 * System.out.print(outputAfferentNodesState[i][j]+","); }
		 * System.out.println(); }
		 * 
		 * System.out.println("Oracle T matrix"); for (int i = 0; i <
		 * oracle.length; i++) { for (int j = 0; j < oracle[0].length; j++) {
		 * System.out.print(oracle[i][j]+","); } System.out.println(); } /
		 **********************************************************************/

		// simple linear regression : transpose(W_out) =
		// pseudoInverse(internalState) * outputs
		Matrix Wout = (M.inverse().times(T)).transpose();

		// Wout stand for a (K+N+L) columns + L lines matrix. each line of index
		// "i" contains alls weights towards output node "i"
		// transpose is optional (if so, parse by column instead of line)

		// Note: (deprecated) Dans le cas present, en l'occurence, il n'y a
		// qu'une seule ligne (une seule sortie) et 20+ colonnes (les 20 arcs
		// entre le reservoir et la sortie et les autres)
		// Note: (deprecated) dans ce cas, on ne s'interesse qu'au reservoir et
		// aux 20 arcs partant du reservoir vers la sortie. (meme si on a plus
		// dans la matrice)

		if (__verbose)
			System.out.println("\n#Resulting Wout matrix size is (" + Wout.getRowDimension() + "," + Wout.getColumnDimension() + ").");

		// update weights -- from reservoir to output nodes only
		if (__verbose)
			System.out.println("\n#update weights from reservoir to output(s).");

		for (int i = outputAfferentNodeStartIndex; i != outputAfferentNodeStopIndex; i++) // ***
		{
			double oldWeight = ((LearnableArc) (((RecurrentNeuron) (__network.getNodeAt(i))).getOutgoingArcAt(0))).getWeightValue(); // ***
			if (__verbose)
				System.out.println("# " + ((RecurrentNeuron) (__network.getNodeAt(i))).getName() + ".weight[0] : " + oldWeight + " -> " + Wout.get(0, i - outputAfferentNodeStartIndex)); // ***
			((LearnableArc) (((RecurrentNeuron) (__network.getNodeAt(i))).getOutgoingArcAt(0))).setWeightValue(Wout.get(0, i - outputAfferentNodeStartIndex)); // le
			// vrai
			// //
			// ***
		}

		return averageError;
	}

	// ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////

	public static double run(EchoStateNetwork __network, ObjectiveFunction __fn, int __startIt, int __iterations, boolean __isInput, boolean __isTeacherForcing, boolean __verbose) // 100
	// it.
	// -
	// without
	// teacher
	// forcing,
	// verbose
	// mode
	{
		double averageError = 0;
		ArrayList<Double> inputValuesList = new ArrayList();
		for (int i = 0; i != __fn.getInputValue(0).length; i++)
			inputValuesList.add(0d);

		if (__verbose) {
			System.out.println("\n### Running : [ input : " + __isInput + " ] [ teacher forcing : " + __isTeacherForcing + " ] ###");
			System.out.println("#it,{oracle,output}*nbout,average_error");
			// System.out.println("#it,{oracle,output}*nbout,instant_error");
		}

		for (int it = __startIt; it != __startIt + __iterations; it++) {
			if (__isTeacherForcing) {
				double teacherOutput[] = __fn.getOutputValue(it);
				for (int i = 0; i != __network.getOutputNeuronListSize(); i++)
					__network.getOutputNeuronAt(i).setInputValue(teacherOutput[i]);
			}

			if (__isInput) {
				inputValuesList.clear();
				double[] inputs = __fn.getInputValue(it);
				for (int i = 0; i != inputs.length; i++)
					inputValuesList.add(inputs[i]); // TODO pourquoi?
			}

			__network.step(inputValuesList);

			double oracleOutput[] = __fn.getOutputValue(it + 1);
			double error = Math.pow((__network.getOutputNeuronAt(0).getValue() - oracleOutput[0]), 2);
			averageError = averageError + error;

			if (__verbose) {
				for (int i = 0; i != __network.getOutputNeuronListSize(); i++) {
					System.out.print(it + "\t" + oracleOutput[i] + "\t");
					System.out.print(__network.getOutputNeuronAt(i).getValue() + "\t");
				}
				System.out.println(averageError / ((it - __startIt + 1))); // average
				// error
				// System.out.println(error); // instant error
			}

		}
		averageError = averageError / ((__iterations));
		return averageError;
	}

	// ---------------------------------------------------------------------------------------------------------------------------

	// testing.
	public static void main(String[] args) {

		Display.info("### ECHO STATE NETWORK : initialization and learning tutorial ###\n20070613, niko.\n\n");

		double startTime = System.currentTimeMillis();

		// * build and display ESN

		Display.warning("Note: an ESN does not contain a \"bias\" node by default. You may want to create an additional input which fires always 1.0 depending on your experimental setup.");

		int nbIn = 1; // 1
		int reservoirSize = 100; // [1:100]
		int nbOut = 1; // 1

		boolean outputTanhFn = true; // true: tanh, false: linear

		boolean inputOutputDirectConnections = false;
		boolean outputSelfRecurrentConnections = false;

		EchoStateNetwork network = createESN(nbIn, reservoirSize, nbOut, 0.05, // connection
				// density
				// [1:0.05]
				0.88, // dampening (1.0: spectral radius) [1:0.88]
				true, // input to reservoir connections. ignore=false, but
				// piconode needs at least one (possibliy unused) input
				// unit.
				inputOutputDirectConnections, // connections btw inputs and
				// outputs ?
				outputSelfRecurrentConnections, // self connections btw output
				// and output?
				true, // connections btw outputs and reservoir?
				0.5, // input->reservoir weight range [fixed]
				0.5, // reservoir->output initial weight range [learning]
				0.5, // output->reservoir weight range [fixed]
				0.5, // output->output self cnx (fixed)
				0.01, // noise value added to output when going back to
				// reservoir [1:0.01]
				outputTanhFn, // output Activation Function (true: tanh,
				// false: linear)
				true // display info ("verbose" mode).
		);
		/*
		 * @param __nbIn desired number of inputs + 1 (bias neuron, to be
		 * manually set to 1.0) @param __reservoirSize number of hidden units
		 * @param __nbOut @param __connectionDensity (btw 0 and 1). gives
		 * probability to have 1 or -1 rather than 0 value @param
		 * __spectralRadiusNormalizationValue must be <1 to ensure contraction
		 * (e.g. 0.9). @param __inputConnections consider/ignore input to
		 * reservoir connections @param __inputOutputDirectConnections allow
		 * direct connections from input to output ? @param
		 * __outputSelfRecurrentConnections allow recurrent connections for each
		 * output to itself ? @param __backwardConnections allow backward
		 * connections from output to reservoir? @param __inputWeightRange range
		 * (centered at zero) of weight values from inputs to reservoir (e.g.
		 * 2.0 means weights are btw -1 and 1) @param __outputWeightRange range
		 * (centered at zero) of weight values from outputs to reservoir (e.g.
		 * 2.0 means weights are btw -1 and 1) @param __backwardWeightRange
		 * range (centered at zero) of weight values from outputs back to
		 * reservoir (if backward cnx allowed, otw any value is ok). @param
		 * __noiseValue added noise to output nodes towards reservoir @param
		 * __outputActivationFunction output Activation Function (true: tanh,
		 * false: linear) @param __verbose if true, display information
		 * regarding reservoir initialization @return
		 */

		System.out.println("\n");
		network.displayInformation();

		// debug : checking for weight values of arcs projecting onto output
		// nodes
		// double [] list = network.getOutputNodesAfferentArcWeights();
		// for ( int i = 0 ; i !=
		// network.getOutputNodesAfferentArcWeightsListSize() ; i++ )
		// System.out.println("arc at index " + i + " : " + list[i]);
		// System.exit(-3);

		ObjectiveFunction fn = new Sin7ObjectiveFunction();

		// step 1 : standard experimental setup : (1) washing out internal
		// dynamics (2) sampling and learning (3) testing with teacher forcing
		// (4) real-world condition testing

		System.out.println("#initial washout");
		EchoStateNetworkFactory.run(network, fn, 0, 100, false, false, true); // initial
		// washout
		// - no
		// teacher
		// forcing,
		// no
		// verbose
		// mode
		System.out.println("#sampling and learning");
		EchoStateNetworkFactory.sampleAndLearn(network, fn, 100, 200, true); // sample
		// and
		// learn
		System.out.println("#running (1)");
		EchoStateNetworkFactory.run(network, fn, 300, 100, true, true, true); // 100
		// it.
		// -
		// teacher
		// forcing,
		// verbose
		// mode
		System.out.println("#running (2)");
		EchoStateNetworkFactory.run(network, fn, 400, 100, true, false, true); // 100
		// it.
		// -
		// without
		// teacher
		// forcing,
		// verbose
		// mode

		// step 2 [optional/demo] : (1) adding perturbation for some steps (2)
		// showing recovery without perturbations
		// demo purpose: code show how to iterate the network - this is broadly
		// similar to the run method

		/* begin comment */

		double averageError = 0;
		ArrayList<Double> inputValuesList = new ArrayList();

		System.out.println("\n### Running : [ input : true ] [ teacher forcing : false ] [ adding *strong* perturbation to inputs] ###");
		System.out.println("#it,oracle,output");
		// System.out.println("#it,oracle,output,average_error");

		for (int it = 500; it != 600; it++) {
			double[] inputs = fn.getInputValue(it);
			for (int i = 0; i != inputs.length; i++)
				inputValuesList.add(inputs[i] + Tools.randomNoise(1d));

			network.step(inputValuesList);

			double oracleOutput[] = fn.getOutputValue(it + 1);
			double error = Math.pow((network.getOutputNeuronAt(0).getValue() - oracleOutput[0]), 2);
			averageError = averageError + error;

			for (int i = 0; i != network.getOutputNeuronListSize(); i++) {
				System.out.print(it + "\t" + oracleOutput[i] + "\t");
				System.out.print(network.getOutputNeuronAt(i).getValue() + "\t");
			}
			System.out.println(averageError / ((it - 500 + 1)));

			inputValuesList.clear();
		}

		EchoStateNetworkFactory.run(network, fn, 600, 200, true, false, true); // show
		// how
		// the
		// network
		// recover
		// from
		// previous
		// perturbations

		// step 3 [optional/demo] : (1) offset perturbation on the input
		// sequence (i.e. miss N steps) (2) showing recovery towards new phased
		// input sequence
		/**/
		averageError = 0;
		inputValuesList = new ArrayList();

		int perturbationOffset = 16; // (int)(Math.random() * 25.); // phase
		// is ~33 steps.

		System.out.println("\n### Running : [ input : true ] [ teacher forcing : false ] [ recovery from new input sequence starting point ] ###");
		System.out.println("#it,oracle,output,average_error");
		// System.out.println("#it,oracle,output,instant_error");

		for (int it = 800; it != 1000; it++) {
			double[] inputs = fn.getInputValue(it + perturbationOffset);
			for (int i = 0; i != inputs.length; i++)
				inputValuesList.add(inputs[i]);

			network.step(inputValuesList);

			double oracleOutput[] = fn.getOutputValue(it + perturbationOffset + 1);
			double error = Math.pow((network.getOutputNeuronAt(0).getValue() - oracleOutput[0]), 2);
			averageError = averageError + error;

			for (int i = 0; i != network.getOutputNeuronListSize(); i++) {
				System.out.print(it + "\t" + oracleOutput[i] + "\t");
				System.out.print(network.getOutputNeuronAt(i).getValue() + "\t");
			}
			System.out.println(averageError / ((it - 800 + 1)));
			// System.out.println(error);

			inputValuesList.clear();
		}
		/**/
		// EchoStateNetworkFactory.run(network,fn,810,200,true,false,true); //
		// same as previous code block (presentation issue...).
		// list values for all neurons
		// for ( int i = 0 ; i != network.getNodeListSize() ; i++ )
		// System.out.println("neuron at index " + i + " has value of " +
		// ((RecurrentNeuron)network.getNodeAt(i)).getValue() );
		/* end comment */

		// * display network graphical representation
		// int returnValue = Visualizer.showNetwork(network);
		System.out.println("\n# Terminated (" + ((System.currentTimeMillis() - startTime) / 1000) + "s elapsed).");

	}

}
