package com.anji.hyperneat.onlinereinforcement;

import com.anji.hyperneat.nd.NDFloatArray;
import com.anji.hyperneat.nd.Util;
import com.anji.hyperneat.nd.NDFloatArray.MatrixIterator;

public class ActivatorNDABCHebbian extends OnlineLearningActivatorNDBase {

	public ActivatorNDABCHebbian(GridNetHebbianABC phenotype) {
		super(phenotype);
	}

	@Override
	public void updateNet(NDFloatArray desiredOuts) {
		updateNet(desiredOuts, 1.0f);
	}

	public void updateNet(NDFloatArray desiredOuts, float learningRateModifier) {
		float reward = 0, val;
		for (MatrixIterator desiredOut = desiredOuts.iterator(); desiredOut.hasNext(); desiredOut.next()) {
			if ((val=desiredOut.get()) != 0.0f) {
				reward = val;
				break;
			}
		}
    	for (int l = 1; l < working.getNumLayers(); l++) {
			applyHebbianRule(l, reward, learningRateModifier);
    	}
	}
	

//	@Override
//	public NDFloatArray next(NDFloatArray stimuli) {
//		setInputs(stimuli);
//		stepFF();
//		return getOutputs();	
//		
//	}
	
//    /**
//     * Perform one complete cycle for a feed forward network, propogating signal
//     * from input layer to output layer, only activating each layer once in
//     * sequence (connections can only exist from layer n to layer n+1).
//     */
//    public void stepFF() {
//        //for each target layer
//    	for (int l = 1; l < working.getNumLayers(); l++) {
//    		for (NDFloatArray.MatrixIterator tgtIt = working.getLayer(l).iterator(); tgtIt.hasNext(); tgtIt.next()) {
//    			int[] tgtCoords = tgtIt.getCurrentCoordinates();
//    			float sum = working.getBias()[l-1].get(tgtCoords);
//    			
//    			for (NDFloatArray.MatrixIterator srcIt = getLayers()[l-1].iterator(); srcIt.hasNext(); srcIt.next()) {
//    				sum += srcIt.get() * working.getWeights()[l-1].get(tgtCoords, srcIt.getCurrentCoordinates());
//    			}
//    			
//    			tgtIt.set( working.getActivationFunction().apply(sum) );
//    		}
//    		
//			
//    	}
//    }
	
	private void applyHebbianRule(int l, float reward, float modifier) {
		GridNetHebbianABC hebNet = (GridNetHebbianABC) working;
		// Hebbian ABC
		// delta_w = n_learning_rate * (A_LearningRate * input * output + B_LearningRate * input + C_LearningRate * output)
		NDFloatArray weights = hebNet.getWeights()[l-1];
		
		for (MatrixIterator w = weights.iterator(); w.hasNext(); w.next()) {
			int[] weightCoords = w.getCurrentCoordinates();
			int[] srcCoords = Util.getSrcCoordsFromCoordsSet(weightCoords);
			int[] tgtCoords = Util.getTgtCoordsFromCoordsSet(weightCoords);
			float nLearningRate = hebNet.getNLearningRate(l-1, weightCoords) * modifier;
			float ALearningRate = hebNet.getALearningRate(l-1, weightCoords) * modifier;
			float BLearningRate = hebNet.getBLearningRate(l-1, weightCoords) * modifier;
			float CLearningRate = hebNet.getCLearningRate(l-1, weightCoords) * modifier;
			float inputVal = hebNet.getLayers()[l-1].get(srcCoords);
			float outputVal = hebNet.getLayers()[l].get(tgtCoords);
			
			// Apply hebbian to the current weight
			float delta = nLearningRate * (ALearningRate * inputVal * outputVal + BLearningRate * inputVal + CLearningRate * outputVal);
			if (reward < 0) delta = 0 - delta;
			w.set( w.get() + delta );
		}
		
		hebNet.decayLearningRates();
	}

	/**
	 * @param args
	 */
	public static void main(String[] args) {
		// TODO Auto-generated method stub

	}

}
