package pl.edu.agh.nn.backpropagation;

import pl.edu.agh.nn.net.AbstractNetWithLayers;

public class BackPropagationNet extends AbstractNetWithLayers<BackPropagationLayer> {

	private int stepsCount;

	@Override
	public double[] learn(double args[], double expected[]) {

		double[] computed = compute(args);

		double[][][] changes = new double[innerLayers.size() + 1][][];
		double[][] dfis = new double[innerLayers.size() + 1][];

		changes[innerLayers.size()] = new double[outputLayer.getNeurons().size()][];
		dfis[innerLayers.size()] = new double[outputLayer.getNeurons().size()];
		double[] lastErrors = new double[outputLayer.getNeurons().size()];
		for (int i = 0; i < outputLayer.getNeurons().size(); ++i) {
			BackPropagationNeuron neuron = outputLayer.getNeurons().get(i);
			double e = neuron.getActivation().deriveComputation(computed[i]) * (expected[i] - computed[i]);
			lastErrors[i] = e;
			double dfi = neuron.getLearningSpeed() * e;
			dfis[innerLayers.size()][i] = dfi;
			changes[innerLayers.size()][i] = new double[neuron.getSynapses().size()];
			for (int j = 0; j < changes[innerLayers.size()][i].length; ++j) {
				changes[innerLayers.size()][i][j] = dfi * neuron.getSynapses().get(j).getFrom().getOutput();
			}
		}

		BackPropagationLayer lastLayer = outputLayer;

		for (int i = innerLayers.size() - 1; i >= 0; --i) {
			BackPropagationLayer innerLayer = innerLayers.get(i);
			double[] errors = new double[innerLayer.getNeurons().size()];
			changes[i] = new double[innerLayer.getNeurons().size()][];
			dfis[i] = new double[innerLayer.getNeurons().size()];
			for (int j = 0; j < innerLayer.getNeurons().size(); ++j) {
				BackPropagationNeuron n = innerLayer.getNeurons().get(j);
				double g = 0.0;
				for (int k = 0; k < lastLayer.getNeurons().size(); ++k) {
					g += lastErrors[k] * lastLayer.getNeurons().get(k).getSynapses().get(j).getWeight();
				}
				double e = n.getActivation().deriveComputation(n.getOutput()) * g;
				errors[j] = e;
				double dfi = n.getLearningSpeed() * e;
				dfis[i][j] = dfi;
				changes[i][j] = new double[n.getSynapses().size()];
				for (int k = 0; k < changes[i][j].length; ++k) {
					changes[i][j][k] = dfi * n.getSynapses().get(k).getFrom().getOutput();
				}
			}
			lastErrors = errors;
			lastLayer = innerLayer;
		}

		for (int i = 0; i < innerLayers.size(); ++i) {
			BackPropagationLayer innerLayer = innerLayers.get(i);
			for (int j = 0; j < innerLayer.getNeurons().size(); ++j) {
				BackPropagationNeuron n = innerLayer.getNeurons().get(j);
				if (innerLayer.isBiasConnected()) {
					n.setBias(n.getBias() + dfis[i][j]);
				}
				for (int k = 0; k < n.getSynapses().size(); ++k) {
					BackPropagationSynapse s = n.getSynapses().get(k);
					s.changeWeight(changes[i][j][k], n.getMomentum());
				}
			}
		}

		for (int j = 0; j < outputLayer.getNeurons().size(); ++j) {
			BackPropagationNeuron n = outputLayer.getNeurons().get(j);
			if (outputLayer.isBiasConnected()) {
				n.setBias(n.getBias() + dfis[innerLayers.size()][j]);
			}
			for (int k = 0; k < n.getSynapses().size(); ++k) {
				BackPropagationSynapse s = n.getSynapses().get(k);
				s.changeWeight(changes[innerLayers.size()][j][k], n.getMomentum());
			}
		}

		return computed;
	}

	@Override
	public boolean isLearningFinished() {
		return stepsCount-- < 0;
	}

}
