package com.anji.hyperneat.onlinereinforcement;

import com.anji.hyperneat.nd.NDFloatArray;
import com.anji.hyperneat.nd.NDFloatArray.MatrixIterator;
import com.anji.hyperneat.nd.Util;

public class ActivatorNDHebbian extends OnlineLearningActivatorNDBase {

	public ActivatorNDHebbian(ActivatorNDLR phenotype) {
		super(phenotype);
	}

	@Override
	public void updateNet(NDFloatArray desiredOuts) {
		updateNet(desiredOuts, 1.0f);
	}
	
	
	public void updateNet(NDFloatArray desiredOuts, float modifier) {
		float reward = 0, val;
		for (MatrixIterator desiredOut = desiredOuts.iterator(); desiredOut.hasNext(); desiredOut.next()) {
			if ((val=desiredOut.get()) != 0.0f) {
				reward = val;
				break;
			}
		}
    	for (int l = 1; l < working.getNumLayers(); l++) {
			applyHebbianRule(l, reward, modifier);
    	}
	}
	
//	private void applyHebbianRule(int l, float reward) {
//		applyHebbianRule(l, reward, 1.0f);
//	}
	
	
	private void applyHebbianRule(int l, float reward, float modifier) {
		// Basic Hebbian
		// delta_w = learning rate * input * output
		NDFloatArray weights = working.getWeights()[l-1];
		
		for (MatrixIterator w = weights.iterator(); w.hasNext(); w.next()) {
			int[] weightCoords = w.getCurrentCoordinates();
			int[] srcCoords = Util.getSrcCoordsFromCoordsSet(weightCoords);
			int[] tgtCoords = Util.getTgtCoordsFromCoordsSet(weightCoords);
			float learningRate = working.getWeightLearningRate(l-1, weightCoords) * modifier;
			float inputVal = working.getLayers()[l-1].get(srcCoords);
			float outputVal = working.getLayers()[l].get(tgtCoords);
			
			// Apply hebbian to the current weight
			float delta = learningRate * inputVal * outputVal;
			if (reward < 0) delta = 0 - delta;
			w.set( w.get() + delta );
		}
		
		working.decayLearningRates();
	}

	/**
	 * @param args
	 */
	public static void main(String[] args) {
		// TODO Auto-generated method stub

	}

}
