/* bredeche(at)lri.fr
 * Created on 14 d�c. 2006
 */

package piconode.factory;

import java.util.ArrayList;

import piconode.core.arc.ArcForBackPropLearning;
import piconode.core.arc.WeightedArc;
import piconode.core.node.FeedForwardNeuralNetwork;
import piconode.core.node.FeedForwardNeuralNetworkForBackPropLearning;
import piconode.core.node.Neuron;
import piconode.core.node.NeuronForBackPropLearning;
import piconode.ext.ActivationFunction_HyperbolicTangent;
import piconode.ext.ActivationFunction_Linear;
import piconode.ext.ActivationFunction_LogisticSigmoid;
import piconode.toolbox.Tools;
import piconode.visualpiconode.Visualizer;

public class MultiLayerPerceptronFactory {

	/**
	 * build a MLP (with bias node) prepared for backprop
	 * 
	 * @param __nbin
	 * @param __nbhidden
	 * @param __nbout
	 * @param __activationFunction
	 *            true is logistic sigmoid (output: 0;1), false if tanh (output:
	 *            -1;1)
	 * @return
	 */
	public static FeedForwardNeuralNetwork createPerceptron(int __nbin, int __nbhidden, int __nbout, boolean __activationFunction) {
		return MultiLayerPerceptronFactory.createPerceptron(__nbin, __nbhidden, __nbout, __activationFunction, true);
	}

	/**
	 * build a MLP (with bias node)
	 * 
	 * @param __nbin
	 * @param __nbhidden
	 * @param __nbout
	 * @param __activationFunction
	 *            true is logistic sigmoid (output: 0;1), false if tanh (output:
	 *            -1;1)
	 * @param __learning
	 *            enable back-propagation
	 * @return
	 */
	public static FeedForwardNeuralNetwork createPerceptron(int __nbin, int __nbhidden, int __nbout, boolean __activationFunction, boolean __learning) {

		if (__activationFunction == false) {
			System.out.println("[WARNING] hyperbolic tangent function seems to bug if backprop as of 20070719 (but ok nevertheless in other case such as ESN for e.g.).");
		}

		if (__nbhidden == 0) {
			System.out.println("[ERROR] MultiLayerPerceptronFactory.createPerceptron(...) - cannot create a network with zero hidden node.");
			System.exit(-1);
		}

		// step 1 : create a network

		FeedForwardNeuralNetwork network;
		if (__learning == true)
			network = new FeedForwardNeuralNetworkForBackPropLearning(true);
		else
			network = new FeedForwardNeuralNetwork(true);

		// step 2 & 3 : create some neurons and register inputs/outputs

		ArrayList inputList = new ArrayList();
		for (int i = 0; i != __nbin; i++) {
			Neuron neuron;
			if (__learning == true)
				neuron = new NeuronForBackPropLearning((FeedForwardNeuralNetworkForBackPropLearning) network, new ActivationFunction_Linear(), "in(" + i + ")");
			else
				neuron = new Neuron(network, new ActivationFunction_Linear(), "in(" + i + ")");
			inputList.add(neuron);
			network.registerInputNeuron(neuron);
		}
		ArrayList hiddenList = new ArrayList();
		for (int i = 0; i != __nbhidden; i++) {
			if (__learning == true)
				if (__activationFunction == true)
					hiddenList.add(new NeuronForBackPropLearning((FeedForwardNeuralNetworkForBackPropLearning) network, new ActivationFunction_LogisticSigmoid(), "hidden(" + i + ")"));
				else
					hiddenList.add(new NeuronForBackPropLearning((FeedForwardNeuralNetworkForBackPropLearning) network, new ActivationFunction_HyperbolicTangent(), "hidden(" + i + ")"));
			else if (__activationFunction == true)
				hiddenList.add(new Neuron(network, new ActivationFunction_LogisticSigmoid(), "hidden(" + i + ")"));
			else
				hiddenList.add(new Neuron(network, new ActivationFunction_HyperbolicTangent(), "hidden(" + i + ")"));
		}
		ArrayList outputList = new ArrayList();
		for (int i = 0; i != __nbout; i++) {
			// NeuronForBackPropLearning neuron = new NeuronForBackPropLearning(
			// network, "out("+i+")");
			Neuron neuron;
			if (__learning == true)
				neuron = new NeuronForBackPropLearning((FeedForwardNeuralNetworkForBackPropLearning) network, new ActivationFunction_Linear(), "out(" + i + ")");
			else
				neuron = new Neuron(network, new ActivationFunction_Linear(), "out(" + i + ")");
			outputList.add(neuron);
			network.registerOutputNeuron(neuron);
		}

		// step 4 : create the topology

		for (int i = 0; i != inputList.size(); i++)
			for (int j = 0; j != hiddenList.size(); j++)
				if (__learning == true)
					network.registerArc(new ArcForBackPropLearning((NeuronForBackPropLearning) inputList.get(i), (NeuronForBackPropLearning) hiddenList.get(j), Tools.getArcWeightRandomInitValue(-1, 2)));
				else
					network.registerArc(new WeightedArc((Neuron) inputList.get(i), (Neuron) hiddenList.get(j), Tools.getArcWeightRandomInitValue(-1, 2)));
		for (int i = 0; i != hiddenList.size(); i++)
			for (int j = 0; j != outputList.size(); j++)
				if (__learning == true)
					network.registerArc(new ArcForBackPropLearning((NeuronForBackPropLearning) hiddenList.get(i), (NeuronForBackPropLearning) outputList.get(j), Tools.getArcWeightRandomInitValue(-1, 2)));
				else
					network.registerArc(new WeightedArc((Neuron) hiddenList.get(i), (Neuron) outputList.get(j), Tools.getArcWeightRandomInitValue(-1, 2)));

		// step 5 : initialize the network (perform some integrity checks and
		// internal encoding)

		network.initNetwork();

		// step 6 (optional) : set parameters for learning -- here we use
		// default parameters (all nodes are learning nodes + etalearningrate is
		// 1)

		// none

		// end of init

		return network;
	}

	// testing.
	public static void main(String[] args) {
		FeedForwardNeuralNetworkForBackPropLearning net = (FeedForwardNeuralNetworkForBackPropLearning) createPerceptron(4, 6, 2, false, true);
		net.displayInformation();
		int retour = Visualizer.showNetwork(net);
	}

	/**
	 * 
	 * @param __network
	 * @param __inputValues
	 * @param __outputValues
	 * @param __it
	 *            number of learning iterations (i.e. size of data * _it)
	 * @param __display
	 * @return error on learning set
	 */
	public static double learn(FeedForwardNeuralNetworkForBackPropLearning __network, ArrayList<double[]> __inputValuesList, ArrayList<double[]> __outputValuesList, int __it, boolean __display) {

		if (__display) {
			__network.displayInformation();
			System.out.println("\n\n###start learning###");
			System.out.println("# learning cycle # estimated squared error on learning set #");
		}

		double errorOnLearningSet = 0;

		int iterations = __it * __inputValuesList.size();

		for (int i = 0; i != __it; i++) // learn for __it cycles
		{
			// perform a learning cycle (all given examples are considered)
			for (int j = 0; j != __inputValuesList.size(); j++) {
				double[] inputValues = __inputValuesList.get(j);

				__network.step(inputValues);

				double[] outputValues = __outputValuesList.get(j);

				double instantError = __network.estimateSquaredError(outputValues);
				errorOnLearningSet += instantError;

				/***************************************************************
				 * if ( __display ) { System.out.println("[debug] " + (
				 * i*__inputValuesList.size() + j) + " : error is " + (
				 * errorOnLearningSet / ( i*__inputValuesList.size() + j+1 ) ) );
				 * System.out.print("[debug] {"); for ( int k = 0 ; k !=
				 * inputValues.length ; k++ ) System.out.print(inputValues[k]+"
				 * "); System.out.print("} => {"); for ( int k = 0 ; k !=
				 * __network.getOutputNeuronListSize() ; k++ )
				 * System.out.print(__network.getOutputNeuronValues()[k]+" ");
				 * System.out.print("} -- should be : {"); for ( int k = 0 ; k !=
				 * outputValues.length ; k++ )
				 * System.out.print(outputValues[k]+" ");
				 * System.out.println("}"); }/
				 **************************************************************/

				__network.performBackPropagationLearning(outputValues);
			}

			if (__display) {
				System.out.print((i + 1) * __inputValuesList.size() + " \t");
				System.out.print((errorOnLearningSet / ((i + 1) * __inputValuesList.size())) + " \n");
			}
		}

		// display estimated squared error
		errorOnLearningSet = errorOnLearningSet / iterations;
		if (__display) {
			System.out.print(errorOnLearningSet + " \t");
			System.out.print("\n");
			System.out.println("Stopped learning.\n");
		}

		return errorOnLearningSet;

	}

}
