package de.mlp_distributed.mlp.core;

import java.util.Random;

import de.mlp_distributed.mlp.math.function.Functions;
import de.mlp_distributed.mlp.math.mahout.Matrix;
import de.mlp_distributed.mlp.math.mahout.Vector;

/**
 * Only for testing purposes!
 * 
 * @author twist
 * 
 */
public class MockMultiLayerPerceptron extends MultiLayerPerceptron {

	public MockMultiLayerPerceptron(final int nbInputUnits, final int nbOutputUnits, final int[] nbHiddenUnits,
			final boolean mutuallyExclusiveClasses) throws Exception {
		super(nbInputUnits, nbOutputUnits, nbHiddenUnits, mutuallyExclusiveClasses);
	}

	@Override
	public void initWeightsRandomly(final Random gen) {
		for (final Matrix w : this.weights) {
			for (int i = 0; i < w.rowSize(); i++) {
				for (int j = 0; j < w.columnSize(); j++) {
					final double value = 1;
					w.setQuick(i, j, value);
				}
			}
		}
	}

	@Override
	public Matrix[] getDerivativeOfTheCostWithoutRegularization(final Vector input, final Vector target) {
		final Matrix costDervative[] = this.getMatrixTopology();
		for (final Matrix w : costDervative) {
			for (int i = 0; i < w.rowSize(); i++) {
				for (int j = 0; j < w.columnSize(); j++) {
					final double value = 1;
					w.setQuick(i, j, value);
				}
			}
		}
		return costDervative;
	}

	@Override
	public void assignRegularizationAndLearningrate(final Matrix[] gradients) {
		for (int i = 0; i < this.weights.length; i++) {
			this.weights[i].assign(gradients[i], Functions.PLUS);
		}
	}

	@Override
	public Vector classifyFull(final Vector instance) {
		return instance;
	}

	@Override
	public double getCost(final Vector output, final Vector target) {
		return 1.0;
	}
}
