package com.anji.hyperneat.onlinereinforcement;

import com.anji.hyperneat.nd.NDFloatArray;
import com.anji.hyperneat.nd.NDFloatArray.MatrixIterator;
import com.anji.hyperneat.nd.Util;
import com.anji.hyperneat.onlinereinforcement.ActivatorNDLR;
import com.anji.hyperneat.onlinereinforcement.IOnlineLearningActivatorND;

public class ActivatorNDBackPropagator extends OnlineLearningActivatorNDBase {
	
	public ActivatorNDBackPropagator(ActivatorNDLR phenotype) {
		super(phenotype);
	}
	

	@Override
	public void updateNet(NDFloatArray desiredOuts) {
		performBackPropagation(desiredOuts, 1.0f);
	}

	@Override
	public void updateNet(NDFloatArray desiredOuts, float learningRateModifier) {
		performBackPropagation(desiredOuts, learningRateModifier);
	}

	/**
	 * @param desiredOuts
	 */
	protected void performBackPropagation(NDFloatArray desiredOuts, float learningRateModifier) {
		int[] coords;
		
		/* Compute discrepancy error for each layer, starting with output layer
		 * and progressing backwards to first layer. 
		 */
		
		int layer;
		NDFloatArray[] d = calcDeltas(desiredOuts);
		
		// Now perform updates
		for (layer = d.length-1; layer >= 0; layer--) {
			// Weights
			for (MatrixIterator it = working.getWeights()[layer].iterator(); 
				it.hasNext();
				it.next()
			) {
				coords = it.getCurrentCoordinates();
				int[] tgtcoords = Util.getTgtCoordsFromCoordsSet(coords);
				int[] srccoords = Util.getSrcCoordsFromCoordsSet(coords);
				float learningRate = working.getWeightLearningRate(layer, coords) * learningRateModifier;
				float activation = working.getLayer(layer).get(srccoords);
				float weightChange = learningRate * activation * d[layer].get(tgtcoords);
				it.set(it.get() + weightChange);
			}
			
			// Bias
			for (MatrixIterator it = working.getBias()[layer].iterator(); 
				it.hasNext();
				it.next()
			) {
				coords = it.getCurrentCoordinates();
				float learningRate = working.getBiasLearningRate(layer, coords);
				float biasChange = learningRate * d[layer].get(coords);
				it.set(it.get() + biasChange);
			}
		}
		
		working.decayLearningRates();
	}


	/**
	 * @param desiredOuts
	 * @param d
	 */
	protected NDFloatArray[] calcDeltas(NDFloatArray desiredOuts) {
		NDFloatArray[] d = new NDFloatArray[working.getNumLayers()-1];
		int[] coords;
		float expected;
		float discrepancyErr;
		
		// Output layer first
		int layer = d.length-1; 
		d[layer] = new NDFloatArray(working.getOutputDimension());
		for (MatrixIterator it = working.getOutputs().iterator(); 
			it.hasNext();
			it.next()
		) {
			coords = it.getCurrentCoordinates();
			expected = desiredOuts.get( coords );
			if (expected == 0.0f) expected = 0.0001f;
			if (expected == 1.0f) expected = 0.9999f;
			float val = it.get();
			discrepancyErr = val * (1 - val) * (expected - val);
			d[layer].set( discrepancyErr, coords );
		}
		
		// Each layer prior
		for (layer = d.length - 1; layer > 0; layer--) {
			d[layer-1] = new NDFloatArray(working.getLayer(layer).getDimensions());
			for (MatrixIterator it = working.getLayer(layer).iterator(); 
				it.hasNext();
				it.next()
			) {
				coords = it.getCurrentCoordinates();
				float val = it.get();
				discrepancyErr = val * (1 - val) * calcWeightErrSum(coords, working.getWeights()[layer], d[layer]);
				d[layer-1].set( discrepancyErr, coords );
			}
		}
		
		return d;
	}


	

	private float calcWeightErrSum(int[] srcCoords, NDFloatArray wgts, NDFloatArray discrepancyErrs) {
		float sum = 0;
		for (MatrixIterator it = discrepancyErrs.iterator(); 
			it.hasNext();
			it.next()
		) {
			int[] tgtCoords = it.getCurrentCoordinates();
			sum += wgts.get(tgtCoords, srcCoords) * it.get();
		}
		
//		for (MatrixIterator it = wgts.iterator(); 
//		it.hasNext();
//		it.next()
//		) {
//			
//			int[] wgtSrcCoords = getSrcCoordsFromCoordsSet(it.getCurrentCoordinates());
//			
//		}
		
		return sum;
	}

	/**
	 * @param args
	 */
	public static void main(String[] args) {
		// TODO Auto-generated method stub
	
	}

}
