/*
 * Licensed to the Apache Software Foundation (ASF) under one or more
 * contributor license agreements.  See the NOTICE file distributed with
 * this work for additional information regarding copyright ownership.
 * The ASF licenses this file to You under the Apache License, Version 2.0
 * (the "License"); you may not use this file except in compliance with
 * the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package de.mlp_distributed.mlp.core;

import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.Random;

import org.apache.hadoop.io.Writable;
import org.jfree.util.Log;

import de.mlp_distributed.mlp.classfier.AbstractVectorClassifier;
import de.mlp_distributed.mlp.learning.OnlineLearner;
import de.mlp_distributed.mlp.math.Factory;
import de.mlp_distributed.mlp.math.MatrixFactory;
import de.mlp_distributed.mlp.math.VectorFactory;
import de.mlp_distributed.mlp.math.function.DoubleDoubleFunction;
import de.mlp_distributed.mlp.math.function.Functions;
import de.mlp_distributed.mlp.math.mahout.Matrix;
import de.mlp_distributed.mlp.math.mahout.MatrixWritable;
import de.mlp_distributed.mlp.math.mahout.Vector;

/**
 * Multilayer Perceptron MLP
 * 
 * Implements MLP with arbitrary number of hidden layers. Only neurons/units
 * between layers are connected. In future: IO-Short cuts for faster learning
 * (of the linear model)??
 * 
 * Weights as matrices. Each layer (except the output layer) has a additional
 * bias unit +1 The forward pass can be done with: a^(l+1) = g(W^(l) * a^(l)) =
 * g(z^(l)) with a^(l): Vector of the activations of the units in layer l resp.
 * l+1; W: weight matrix
 * 
 */
public class MultiLayerPerceptron extends AbstractVectorClassifier implements OnlineLearner, Writable {

	MatrixFactory matrixFactory;
	VectorFactory vectorFactory;

	public static final int WRITABLE_VERSION = 2;

	// the learning rate of the algorithm
	protected double learningRate = .1;

	// the regularization term, a positive number that controls the size of the
	// weight vector
	private double regularization = 0.01;

	// adagrad
	private double eta = .1;
	private Matrix[] l2NormSum;
	private boolean useAdagrad = false;

	//
	private double momentum = 0.8;

	// the number of layer including input and output layer
	protected int nbLayer;

	// weight matrices
	protected Matrix[] weights;

	// Matrices for storing delta w
	// needed for momentum term
	private Matrix[] oldWeightChange;

	// container for the activations
	protected Vector[] units;

	// number of units in each layer: input layer is layer 0
	protected int[] nbUnits;
	// Convenience
	private int nbOutputs;

	private boolean mutuallyExclusiveClasses;

	private boolean hasNaturalPairing = true;

	// the activity functions res. gradients of each layer
	// activityFunction.lenght = nbLayers, for layer 0 this will not be used
	// so the same indices can be uses as for the layers
	private Squashing squashingFunctions[];

	// private DoubleFunction activityFunctions[];
	// private DoubleFunction activityGradients[];
	// there is a natural paring between the activity function of the output
	// units and the error function
	// DoubleFunction costFunction;

	protected CostFunction costFunction;
	private Matrix[] updateValues;
	private Matrix[] lastWeightChange;
	private boolean useNgWidrowRandomizer = false;
	private boolean useLorotBengioRandomizer = true;

	/**
	 * Construction of nbLayer -1 matrices which can hold weights, weightChanges
	 * etc.
	 * 
	 * @return
	 */
	protected Matrix[] getMatrixTopology() {
		final Matrix[] w = new Matrix[this.nbLayer - 1];
		for (int i = 0; i < (this.nbLayer - 1); i++) {
			//
			w[i] = this.matrixFactory.construct(this.nbUnits[i + 1], this.nbUnits[i]);
		}
		return w;
	}

	/**
	 * Construction of nbLayer vectors which can hold activations, deltas etc.
	 * 
	 * @return
	 */
	private Vector[] getVectorTopology() {
		// construction of the weight matrices
		final Vector[] v = new Vector[this.nbLayer];
		for (int i = 0; i < this.nbLayer; i++) {
			v[i] = this.vectorFactory.construct(this.nbUnits[i]);
		}
		return v;
	}

	private void initResidue() {
		// set topology info:
		this.nbUnits = new int[this.nbLayer];
		for (int i = 0; i < (this.nbLayer - 1); i++) {
			this.nbUnits[i] = this.weights[i].numCols();
		}
		this.nbUnits[this.nbLayer - 1] = this.weights[this.nbLayer - 2].numRows();
		this.nbOutputs = this.nbUnits[this.nbLayer - 1];

		// construction of the units
		this.units = this.getVectorTopology();
		if (this.momentum > 0) {
			this.oldWeightChange = this.getMatrixTopology();
		}
	}

	private void init(final int nbInputUnits, final int nbOutputUnits, final int[] nbHiddenUnits) {

		this.nbLayer = nbHiddenUnits.length + 2;
		this.nbUnits = new int[this.nbLayer];

		// + 1 for the bias unit
		this.nbUnits[0] = nbInputUnits + 1;
		// no bias at output
		this.nbUnits[this.nbLayer - 1] = nbOutputUnits;
		this.nbOutputs = nbOutputUnits;
		for (int i = 1; i < (this.nbLayer - 1); i++) {
			this.nbUnits[i] = nbHiddenUnits[i - 1] + 1;
		}

		// construction of all weights
		this.weights = this.getMatrixTopology();

		// construction of the units
		this.units = this.getVectorTopology();
		if (this.momentum > 0) {
			this.oldWeightChange = this.getMatrixTopology();
		}
	}

	private MultiLayerPerceptron() {
		super();
		this.matrixFactory = Factory.getInstance().getMatrixFactory();
		this.vectorFactory = Factory.getInstance().getVectorFactory();
	}

	public static MultiLayerPerceptron createMultiLayerPerceptron(final DataInput in) throws IOException {
		final MultiLayerPerceptron mlp = new MultiLayerPerceptron();
		mlp.readFields(in);
		return mlp;
	}

	/**
	 * Constructor for MLP weights are not initialize before learning they must
	 * be initialized by initWeightsRandomly()
	 * 
	 * @param nbInputUnits
	 * @param nbOutputUnits
	 * @param nbHiddenUnits
	 *            Array which holds the number of hidden units without counting
	 *            bias units, e.g. nbHiddenUnits[2]={6,4}; 6 hidden units in
	 *            layer 1 and 4 units in layer 2
	 * @param mutuallyExclusiveClasses
	 * @throws Exception
	 */
	public MultiLayerPerceptron(final int nbInputUnits, final int nbOutputUnits, final int[] nbHiddenUnits,
			final boolean mutuallyExclusiveClasses) throws Exception {
		this();
		if ((nbHiddenUnits == null) || (nbHiddenUnits.length == 0)) {
			throw new Exception("Number of hidden units is not allowed to be empty!");
		}
		this.mutuallyExclusiveClasses = mutuallyExclusiveClasses;
		this.init(nbInputUnits, nbOutputUnits, nbHiddenUnits);
		this.setDefaultClassificationActivities(mutuallyExclusiveClasses);
	}

	/**
	 * Constructor for MLP weights are not initialize before learning they must
	 * be initialized by initWeightsRandomly().
	 * 
	 * Momentum can be turned off by setting it to zero.
	 * 
	 * @param nbInputUnits
	 * @param nbOutputUnits
	 * @param nbHiddenUnits
	 * @param mutuallyExclusiveClasses
	 * @param momentum
	 * @throws Exception
	 */
	public MultiLayerPerceptron(final int nbInputUnits, final int nbOutputUnits, final int[] nbHiddenUnits,
			final boolean mutuallyExclusiveClasses, final double momentum) throws Exception {
		this(nbInputUnits, nbOutputUnits, nbHiddenUnits, mutuallyExclusiveClasses);
		this.momentum = momentum;
	}

	/**
	 * 
	 * @return
	 */
	private void setDefaultClassificationActivities(final boolean mutuallyExclusiveClasses) {

		this.hasNaturalPairing = true;
		this.squashingFunctions = new Squashing[this.nbLayer];

		for (int i = 1; i < (this.nbLayer - 1); i++) {
			this.squashingFunctions[i] = Squashing.TANH;
		}
		if (mutuallyExclusiveClasses) {
			this.squashingFunctions[this.nbLayer - 1] = Squashing.SOFTMAX;
			this.costFunction = CostFunction.CROSS_ENTROPY_MUTUALLY_EXCUSIVE_OUTPUTS;
		} else {
			// output layer is sigmoid (logistic function)
			this.squashingFunctions[this.nbLayer - 1] = Squashing.SIGMOID;
			this.costFunction = CostFunction.CROSS_ENTROPY_INDEPENDENT_OUTPUTS;
		}

		// return activities;
	}

	private Vector forwardPass(final Matrix w, final Vector v, final Squashing squashing, final int layer) {

		final Vector o = w.times(v);
		// for (int j = 0; j < o.size(); j++) {
		// final double array_element = o.get(j);
		// boolean stop = false;
		// if (Double.isNaN(array_element) || Double.isInfinite(array_element))
		// {
		// stop = true;
		// }
		// }
		squashing.apply(o);
		// for (int j = 0; j < o.size(); j++) {
		// final double array_element = o.get(j);
		// boolean stop = false;
		// if (Double.isNaN(array_element) || Double.isInfinite(array_element))
		// {
		// stop = true;
		// }
		// }
		// unit 0 is the bias unit; set bias=1 except for output
		if (layer != (this.nbLayer - 2)) {
			o.setQuick(0, 1.0);
		}
		return o;
	}

	/**
	 * 
	 * @param v
	 * @return
	 */
	private static Vector addBias(final Vector v) {
		final int s = v.size() + 1;
		final Vector out = Factory.getInstance().getVectorFactory().construct(s);
		out.setQuick(0, 1.0);
		for (int i = 1; i < s; i++) {
			out.setQuick(i, v.get(i - 1));
		}
		return out;
	}

	/**
	 * Just for prediction not for learning!
	 * 
	 * @param input
	 * @return
	 */
	private Vector forwardPropagation(final Vector input) {
		Vector out = MultiLayerPerceptron.addBias(input);
		// nbWeightMatrices = nbLayer - 1
		for (int i = 0; i < (this.nbLayer - 1); i++) {
			out = this.forwardPass(this.weights[i], out, this.squashingFunctions[i + 1], i);
		}
		return out;
	}

	/**
	 * Just for prediction not for learning!
	 * 
	 * @param input
	 * @return
	 */
	private Vector forwardPropagationNoLink(final Vector input) {
		Vector out = MultiLayerPerceptron.addBias(input);
		// nbWeightMatrices = nbLayer - 1
		for (int i = 0; i < (this.nbLayer - 2); i++) {
			out = this.forwardPass(this.weights[i], out, this.squashingFunctions[i + 1], i);
		}
		out = this.forwardPass(this.weights[this.nbLayer - 2], out, Squashing.LINEAR, this.nbLayer - 2);
		return out;
	}

	/**
	 * Forward pass to set all activations
	 */
	private Vector setUnitsWithForwardPropagation(final Vector input) {
		Vector out = MultiLayerPerceptron.addBias(input);
		this.units[0] = out;
		for (int i = 0; i < (this.nbLayer - 1); i++) {
			out = this.forwardPass(this.weights[i], out, this.squashingFunctions[i + 1], i);
			this.units[i + 1] = out;
		}
		return out;
	}

	/**
	 * @param delta
	 *            vector of delta of layer l+1
	 * @param wt
	 *            transpose of weight matrix of layer l
	 * @param a
	 *            activities of layer l
	 * @return vector delta of the layer l
	 */
	private Vector getDeltaWithBackwardPass(final Matrix wt, final Vector delta, final int l) {
		final Vector d = wt.times(delta);
		// d = g'(z_j) * w^t * delta_k
		final Vector DerivationOfActivations = this.units[l].clone();
		this.squashingFunctions[l].applyGradient(DerivationOfActivations);
		d.assign(DerivationOfActivations, Functions.MULT);
		return d;
	}

	/**
	 * Can be used only if the natural pairing between cost function and output
	 * unit activation function is valid. Then there is a simple formula for the
	 * deltas (errors) d for the output neurons: d = y - t. with y: output
	 * activity and t: target value For the natural pairing (conjugate link
	 * functions) see e.g. C. Bishop:
	 * "Pattern Recognition and Machine Learning", chapter 5.2 or more
	 * comprehensive in C. Bishop: "Neural Networks for Pattern Recognition",
	 * chapter 6
	 * 
	 * @param targets
	 * @return
	 */
	private Vector getOutputDeltasForNaturalPairing(final Vector targets) {
		final Vector d = this.units[this.nbLayer - 1].clone();
		d.assign(targets, Functions.MINUS);
		return d;
	}

	private Vector getOutputDeltas(final Vector targets) {
		if (this.hasNaturalPairing) {
			return this.getOutputDeltasForNaturalPairing(targets);
		}
		// could be implemented for special cases
		throw new UnsupportedOperationException();
	}

	/*
	 * Get the derivate of the cost for the given data pattern without
	 * regularization
	 * 
	 * @param input the input pattern
	 * 
	 * @param target the should-be output pattern
	 * 
	 * @return the derivative of the total cost dE/dw for all w (weights)
	 */
	public Matrix[] getDerivativeOfTheCostWithoutRegularization(final Vector input, final Vector target) {

		// Optimization-TODO: check if initialization is expensive
		final Vector delta[] = this.getVectorTopology();

		// 1) forward pass to find the activations of all hidden and output
		// units
		this.setUnitsWithForwardPropagation(input);

		// 2) Evaluate the deltas for the output units
		delta[this.nbLayer - 1] = this.getOutputDeltas(target);

		// 3) backpropagate the deltas to obtain the deltas for the hidden units
		// for the input units there are no deltas => i > 0
		for (int i = this.nbLayer - 2; i > 0; i--) {
			final Matrix wt = this.weights[i].transpose();
			delta[i] = this.getDeltaWithBackwardPass(wt, delta[i + 1], i);
			// no delta for bias
			// delta[i].setQuick(0, 0.0);
		}

		// 4) set dE/dw_ij^(l) = delta_i * activations_j
		final Matrix costDervative[] = this.getMatrixTopology();
		for (int i = 0; i < (this.nbLayer - 1); i++) {
			// the cross product of mahout is the outer product
			costDervative[i] = delta[i + 1].cross(this.units[i]);
		}
		return costDervative;
	}

	private static void setWeightChangeToBiasZero(final Matrix m) {
		// set weight change to bias units zero
		// except for output nodes
		for (int j = 0; j < m.numCols(); j++) {
			m.setQuick(0, j, 0.0);
		}
	}

	/**
	 * 
	 * @param input
	 * @param target
	 * @return output vector
	 */
	public Vector trainOnline(final Vector input, final Vector target) {
		final Matrix[] weightChange = this.getDerivativeOfTheCostWithoutRegularization(input, target);
		this.assignRegularizationAndLearningrate(weightChange);

		// return output
		return this.units[this.nbLayer - 1];
	}

	private void computeL2NormSum(final Matrix[] gt) {
		if (this.l2NormSum == null) {
			this.l2NormSum = this.getMatrixTopology();
		}
		for (int layer = 0; layer < gt.length; layer++) {
			final Matrix gradient = gt[layer];
			for (int i = 0; i < gradient.rowSize(); i++) {
				for (int j = 0; j < gradient.columnSize(); j++) {
					this.l2NormSum[layer].set(i, j, this.l2NormSum[layer].get(i, j) + Math.pow(gradient.get(i, j), 2));
				}
			}
		}
	}

	private double getAdagradLearningrate(final int layer, final int row, final int column) {
		double l2normValue = this.l2NormSum[layer].get(row, column);
		if (Math.abs(l2normValue) < 0.0000001) {
			l2normValue = 0;
		}
		if (l2normValue <= 0) {
			l2normValue = 1;
		}
		final double root = Math.sqrt(l2normValue);
		final double result = this.eta / root;
		return result;
	}

	public void assignRegularizationAndLearningrate(final Matrix[] gradients) {
		final Matrix[] copy = new Matrix[gradients.length];
		for (int i = 0; i < gradients.length; i++) {
			copy[i] = gradients[i].clone();
		}

		if (this.useAdagrad) {
			this.computeL2NormSum(copy);
		}

		for (int layer = 0; layer < (this.nbLayer - 1); layer++) {
			copy[layer].assign(this.weights[layer], new DoubleDoubleFunction() {
				@Override
				public double apply(final double a, final double b) {
					return a + (MultiLayerPerceptron.this.regularization * b);
				}
			});
			if (layer != (this.nbLayer - 2)) { // not to last output unit
				MultiLayerPerceptron.setWeightChangeToBiasZero(copy[layer]);
			}
			Matrix wC;
			if (!this.useAdagrad) {
				wC = copy[layer].times(-1.0 * this.learningRate);
			} else {
				wC = copy[layer];
				for (int i = 0; i < wC.rowSize(); i++) {
					for (int j = 0; j < wC.columnSize(); j++) {
						final double learningrate = this.getAdagradLearningrate(layer, i, j);
						final double value = -1 * wC.get(i, j) * learningrate;
						if (!Double.isNaN(value) && !Double.isInfinite(value)) {
							wC.set(i, j, value);
						} else {
							Log.error("Weight update is going till infinity.");
						}
					}
				}
			}
			if (this.momentum > 0) {
				// momentum term
				Matrix m = this.oldWeightChange[layer].clone();
				m = m.times(this.momentum);
				wC = wC.plus(m);
				// change weights according to weightChangeMatrix
				this.oldWeightChange[layer] = wC;
			}
			this.weights[layer].assign(wC, Functions.PLUS);
		}
	}

	public void updateiWeightPlus(final Matrix[] gradients, final double currentError, final double lastError) {
		if (this.updateValues == null) {
			final double DEFAULT_INITIAL_UPDATE = 0.1;
			// Initialize Arrays
			if (this.oldWeightChange == null) {
				this.oldWeightChange = this.getMatrixTopology();
			}
			if (this.lastWeightChange == null) {
				this.lastWeightChange = this.getMatrixTopology();
			}

			this.updateValues = this.getMatrixTopology();
			for (int layer = 0; layer < (this.nbLayer - 1); layer++) {
				final Matrix updateValueMatrix = this.updateValues[layer];
				for (int i = 0; i < updateValueMatrix.rowSize(); i++) {
					for (int j = 0; j < updateValueMatrix.columnSize(); j++) {
						updateValueMatrix.set(i, j, DEFAULT_INITIAL_UPDATE);
					}
				}
			}
		}
		for (int layer = 0; layer < (this.nbLayer - 1); layer++) {
			final Matrix gradient = gradients[layer];

			// Regularization
			gradient.assign(this.weights[layer], new DoubleDoubleFunction() {
				@Override
				public double apply(final double a, final double b) {
					return a + (MultiLayerPerceptron.this.regularization * b);
				}
			});
			for (int i = 0; i < gradient.rowSize(); i++) {
				if ((layer == 0) && (i == 0)) {
					// No inputs for bias of hidden layer. This row contains
					// only zeros.
					continue;
				}
				for (int j = 0; j < gradient.columnSize(); j++) {
					final double deltaW = this.updateiWeightPlus(gradient, this.oldWeightChange[layer], layer, i, j, currentError,
							lastError);
					final double w = this.weights[layer].get(i, j);
					this.weights[layer].set(i, j, w + deltaW);
				}
			}
		}
	}

	private int sign(final double value) {
		if (Math.abs(value) < 0.00000000000000001) {
			return 0;
		} else if (value > 0) {
			return 1;
		} else {
			return -1;
		}
	}

	private double updateiWeightPlus(final Matrix gradients, final Matrix lastGradient, final int layer, final int i, final int j,
			final double currentError, final double lastError) {
		final double POSITIVE_ETA = 1.2;
		final double NEGATIVE_ETA = 0.5;
		final double DELTA_MIN = 1e-6;
		final double DEFAULT_MAX_STEP = 50;

		final int change = this.sign(gradients.get(i, j) * lastGradient.get(i, j));
		double weightChange = 0;

		if (change > 0) {
			double delta = this.updateValues[layer].get(i, j) * POSITIVE_ETA;
			delta = Math.min(delta, DEFAULT_MAX_STEP);
			weightChange = -this.sign(gradients.get(i, j)) * delta;
			this.updateValues[layer].set(i, j, delta);
			lastGradient.set(i, j, gradients.get(i, j));
		} else if (change < 0) {
			double delta = this.updateValues[layer].get(i, j) * NEGATIVE_ETA;
			delta = Math.max(delta, DELTA_MIN);
			this.updateValues[layer].set(i, j, delta);

			if (currentError > lastError) {
				weightChange = -this.lastWeightChange[layer].get(i, j);
			}

			lastGradient.set(i, j, 0);
		} else if (change == 0) {
			final double delta = this.updateValues[layer].get(i, j);
			weightChange = -this.sign(gradients.get(i, j)) * delta;
			lastGradient.set(i, j, gradients.get(i, j));
		}
		this.lastWeightChange[layer].set(i, j, weightChange);
		return weightChange;
	}

	/**
	 * Random initialization of the weight matrices according to 4.6 of Yann
	 * LeCun et. al.: "Efficient BackProp"
	 */
	public void initWeightsRandomly(final Random gen) {
		if (this.useNgWidrowRandomizer) {
			this.initWeightsWithNguyenWidrowRandomizer(gen);
			return;
		}

		if (this.useLorotBengioRandomizer) {
			this.initWeightsWithGlorotBengioRandomizer(gen);
			return;
		}

		for (int l = 0; l < (this.nbLayer - 1); l++) {
			final int fanIn = this.nbUnits[l];
			final int fanOut = this.nbUnits[l + 1];

			for (int i = 0; i < fanOut; i++) {
				for (int j = 0; j < fanIn; j++) {
					double w = 0;
					// weights to bias units have to be zero! not for outputs
					// (no output bias)!

					if ((i != 0) || (l == (this.nbLayer - 2))) {
						// the standard derivation s of a uniform distribution
						// [-a,a] is 1/sqrt(3) * a
						// s_should = 1 / sqrt(fanIn) = 1 / sqrt(3) * a => a =
						// sqrt(3)/sqrt(fanIn)
						w = 1.73d / Math.sqrt(fanIn);
						w = ((2.0 * gen.nextDouble()) - 1.0) * w;
					}
					this.weights[l].setQuick(i, j, w);
				}
			}
		}
	}

	/**
	 * Paper: Understanding the difficulty of training deep feedforward neural
	 * networks. written by Xavier Glorot and Yoshua Bengio (DIRO, Universite de
	 * Montreal, Montreal, Quebec, Canada)
	 * http://deeplearning.net/tutorial/mlp.html
	 * 
	 * @param gen
	 */
	private void initWeightsWithGlorotBengioRandomizer(final Random gen) {
		for (int l = 0; l < (this.nbLayer - 1); l++) {
			final int fanIn = this.nbUnits[l];
			final int fanOut = this.nbUnits[l + 1];
			final double distValue = Math.sqrt(6) / Math.sqrt(fanOut + fanIn);

			for (int i = 0; i < fanOut; i++) {
				for (int j = 0; j < fanIn; j++) {
					double w = 0;
					// weights to bias units have to be zero! not for outputs
					// (no output bias)!

					if ((i != 0) || (l == (this.nbLayer - 2))) {
						w = (2 * distValue * gen.nextDouble()) - distValue;
						w = w * 1.5;
					}
					this.weights[l].setQuick(i, j, w);
				}
			}
		}
	}

	private void initWeightsWithNguyenWidrowRandomizer(final Random gen) {
		final double beta = 0.7 * Math.pow(this.nbUnits[1] + 1, 1.0 / (this.nbUnits[0] + 1));

		for (int layer = 0; layer < this.weights.length; layer++) {
			final Matrix weightMatrix = this.weights[layer];
			for (int i = 0; i < weightMatrix.rowSize(); i++) {
				/**
				 * initialize weights randomly.
				 */
				for (int j = 0; j < weightMatrix.columnSize(); j++) {
					double w = 0;
					if ((i != 0) || (layer == (this.weights.length - 1))) {
						/**
						 * generale rule: (max - min) * randomNumber + min. The
						 * following is only suitable for tanh as activation
						 * function.
						 */
						w = (2 * gen.nextDouble()) - 1;
					}
					weightMatrix.setQuick(i, j, w);
				}

				/**
				 * calculate n
				 */
				double n = 0.0;
				for (int j = 0; j < weightMatrix.columnSize(); j++) {
					final double w = weightMatrix.getQuick(i, j);
					n += w * w;
				}
				n = Math.sqrt(n);

				/**
				 * change w
				 */
				for (int j = 0; j < weightMatrix.columnSize(); j++) {
					if ((i != 0) || (layer == (this.weights.length - 1))) {
						double w = weightMatrix.getQuick(i, j);
						w = (beta * w) / n;
						weightMatrix.setQuick(i, j, w);
					}
				}

			}
		}
	}

	private void initWeights(final Matrix w, final int layer) {
		if ((w.numCols() != this.weights[layer].numCols()) || (w.numRows() != this.weights[layer].numRows())) {
			throw new IllegalArgumentException();
		}
		this.weights[layer] = w.clone();
	}

	public void initWeights(final Matrix[] w) {
		if (w.length != this.weights.length) {
			throw new IllegalArgumentException();
		}
		for (int i = 0; i < w.length; i++) {
			this.initWeights(w[i], i);
		}
	}

	public Matrix[] getWeights() {
		return this.weights;
	}

	/**
	 * Chainable configuration option.
	 * 
	 * @param learningRate1
	 *            New value of initial learning rate.
	 * @return This, so other configurations can be chained.
	 */
	public MultiLayerPerceptron learningRate(final double learningRate1) {
		this.learningRate = learningRate1;
		return this;
	}

	/**
	 * Chainable configuration option.
	 * 
	 * @param regularization1
	 *            A positive value that controls the weight vector size.
	 * @return This, so other configurations can be chained.
	 */
	public MultiLayerPerceptron regularization(final double regularization1) {
		this.regularization = regularization1;
		return this;
	}

	/**
	 * Chainable configuration option.
	 * 
	 * @param momentum1
	 *            A positive value that controls the momentum.
	 * @return This, so other configurations can be chained.
	 */
	public MultiLayerPerceptron momentum(final double momentum1) {
		this.momentum = momentum1;
		return this;
	}

	public MultiLayerPerceptron copy() throws Exception {
		this.close();
		final int[] nbHiddenUnits = new int[this.nbUnits.length - 2];
		for (int i = 0; i < nbHiddenUnits.length; i++) {
			nbHiddenUnits[i] = this.nbUnits[i + 1] - 1;
		}
		final MultiLayerPerceptron mlp = new MultiLayerPerceptron(this.nbUnits[0] - 1, this.nbOutputs, nbHiddenUnits,
				this.mutuallyExclusiveClasses);
		mlp.copyFrom(this);
		return mlp;
	}

	@Override
	public int numCategories() {
		// TODO : check what numCategories means and fix this
		if (this.nbOutputs == 1) {
			return 2;
		}
		return this.nbOutputs;
	}

	// for debugging
	private void printMatrices(final Matrix ma[]) {
		for (int i = 0; i < ma.length; i++) {
			System.out.println("matrix " + i);
			this.printMatrix(ma[i]);
		}
	}

	private void printMatrix(final Matrix m) {
		for (int j = 0; j < m.numRows(); j++) {
			System.out.println("col " + j + ":" + m.viewRow(j));
		}
	}

	/**
	 * get the cost for the error function without regularization cost
	 * 
	 * @param output
	 * @param target
	 * @return cost function value
	 */
	public double getCost(final Vector output, final Vector target) {
		return this.costFunction.getCost(output, target);
	}

	@Override
	public Vector classifyFull(final Vector instance) {
		// setUnitsWithForwardPropagation(instance);
		return this.forwardPropagation(instance);
	}

	@Override
	public Vector classify(final Vector instance) {
		final int nbOutput = this.nbOutputs;
		if (this.mutuallyExclusiveClasses) {
			final Vector out = this.vectorFactory.construct(nbOutput - 1);
			// assumes that outputs sum to 1:
			final Vector outputUnits = this.forwardPropagation(instance);
			for (int i = 0; i < (nbOutput - 1); i++) {
				out.setQuick(i, outputUnits.get(i));
			}
		} else {
			if (nbOutput == 1) {
				return this.forwardPropagation(instance);
			}
		}
		throw new UnsupportedOperationException();
	}

	@Override
	public Vector classifyNoLink(final Vector instance) {
		//
		return this.forwardPropagationNoLink(instance);
	}

	@Override
	public double classifyScalar(final Vector instance) {
		final Vector output = this.classifyFull(instance);
		return output.get(0);
	}

	public void copyFrom(final MultiLayerPerceptron other) {

		this.learningRate = other.learningRate;
		this.regularization = other.regularization;
		this.momentum = other.momentum;
		this.mutuallyExclusiveClasses = other.mutuallyExclusiveClasses;
		this.hasNaturalPairing = other.hasNaturalPairing;
		for (int i = 0; i < (this.nbLayer - 1); i++) {
			this.weights[i] = other.weights[i].clone();
		}
		for (int i = 0; i < this.nbLayer; i++) {
			this.squashingFunctions[i] = other.squashingFunctions[i];
		}
		this.costFunction = other.costFunction;

	}

	@Override
	public void write(final DataOutput out) throws IOException {
		out.writeInt(MultiLayerPerceptron.WRITABLE_VERSION);
		out.writeDouble(this.learningRate);
		out.writeDouble(this.regularization);
		out.writeDouble(this.momentum);
		out.writeBoolean(this.mutuallyExclusiveClasses);
		out.writeBoolean(this.hasNaturalPairing);
		out.writeInt(this.nbLayer);
		for (int i = 0; i < (this.nbLayer - 1); i++) {
			MatrixWritable.writeMatrix(out, this.weights[i]);
		}
		for (int i = 1; i < this.nbLayer; i++) {
			out.writeUTF(this.squashingFunctions[i].name());
		}
		out.writeUTF(this.costFunction.name());
		out.writeBoolean(this.useAdagrad);
		out.writeDouble(this.eta);
	}

	@Override
	public void readFields(final DataInput in) throws IOException {
		final int version = in.readInt();
		if (version == MultiLayerPerceptron.WRITABLE_VERSION) {
			this.learningRate = in.readDouble();
			this.regularization = in.readDouble();
			this.momentum = in.readDouble();
			this.mutuallyExclusiveClasses = in.readBoolean();
			this.hasNaturalPairing = in.readBoolean();
			this.nbLayer = in.readInt();
			this.weights = new Matrix[this.nbLayer - 1];
			for (int i = 0; i < (this.nbLayer - 1); i++) {
				this.weights[i] = MatrixWritable.readMatrix(in);
			}
			this.squashingFunctions = new Squashing[this.nbLayer];
			for (int i = 1; i < this.nbLayer; i++) {
				final String squashingString = in.readUTF();
				this.squashingFunctions[i] = Squashing.valueOf(squashingString);
			}
			final String costString = in.readUTF();
			this.costFunction = CostFunction.valueOf(costString);
			this.useAdagrad = in.readBoolean();
			this.eta = in.readDouble();

			// initialize the rest from the information above
			this.initResidue();
		} else {
			throw new IOException("Incorrect object version, wanted " + MultiLayerPerceptron.WRITABLE_VERSION + " got " + version);
		}
	}

	@Override
	public void close() {
		// At moment this is an online classifier, nothing to do.
		// For batch learning: TODO
	}

	@Override
	public void train(final long trackingKey, final String groupKey, final int actual, final Vector instance) {
		// training with one pattern
		final Vector target = this.vectorFactory.construct(1);
		target.setQuick(0, actual);
		this.trainOnline(instance, target);
	}

	@Override
	public void train(final long trackingKey, final int actual, final Vector instance) {
		this.train(trackingKey, null, actual, instance);
	}

	@Override
	public void train(final int actual, final Vector instance) {
		this.train(0, null, actual, instance);
	}

	public int getNumberOfLayers() {
		return this.nbLayer;
	}

	public int getNumberOfInputUnits() {
		return this.nbUnits[0] - 1;
	}

	public int getNumberOfOutputUnits() {
		return this.nbUnits[this.nbUnits.length - 1];
	}

	/**
	 * 
	 * @param layer
	 *            Number of hidden layer starting with one for the first hidden
	 *            layer.
	 * @return Number of hidden units.
	 */
	public int getNumberOfHiddenUnits(final int layer) {
		if (layer >= this.nbUnits.length) {
			return 0;
		}
		return this.nbUnits[layer] - 1;
	}

	public double getEta() {
		return this.eta;
	}

	public void setEta(final double eta) {
		this.eta = eta;
	}

	public boolean isUseAdagrad() {
		return this.useAdagrad;
	}

	public void setUseAdagrad(final boolean useAdagrad) {
		this.useAdagrad = useAdagrad;
	}

	public boolean isUseNgWidrowRandomizer() {
		return this.useNgWidrowRandomizer;
	}

	public void setUseNgWidrowRandomizer(final boolean useNgWidrowRandomizer) {
		this.useNgWidrowRandomizer = useNgWidrowRandomizer;
	}

	public boolean isUseLorotBengioRandomizer() {
		return this.useLorotBengioRandomizer;
	}

	public void setUseLorotBengioRandomizer(final boolean useLorotBengioRandomizer) {
		this.useLorotBengioRandomizer = useLorotBengioRandomizer;
	}
}
