package com.anji.hyperneat.onlinereinforcement;

import com.anji.hyperneat.nd.NDFloatArray;
import com.anji.hyperneat.nd.Util;
import com.anji.hyperneat.nd.NDFloatArray.MatrixIterator;
import com.anji.hyperneat.onlinereinforcement.ActivatorNDLR.LearningRateGranularity;

/**
 * See http://www.stanford.edu/group/pdplab/pdphandbook/handbookch10.html
 * and http://webdocs.cs.ualberta.ca/~sutton/book/ebook/node87.html
 * for details about the algorithm and its gradient descent version.
 * Note that this class is tightly coupled with GridNetTD
 * @author slusk
 *
 */
public class ActivatorNDTemporalDifference extends OnlineLearningActivatorNDBase {

	/* For easy access and speed, cache a link to the GridNetTD used to create 
	 * this activator, instead of casting every time  
	 */
	protected GridNetTD workingTd;
	
	// The Gradients calculated from the previous timestep, required for TD updated
	protected NDFloatArray lastGradients[];
	protected NDFloatArray gradients[];
	protected NDFloatArray[] deltas;
	protected NDFloatArray outputs;	
	protected NDFloatArray lastOutputs;
	protected int depth;
	
	// Eligibility traces, dimension of weights * outputs 
	protected NDFloatArray[] eTraces;
	
	protected static boolean testFlag;
	
	public ActivatorNDTemporalDifference(GridNetTD phenotype) {
		super(phenotype);
		workingTd = (GridNetTD) working;
		eTraces = new NDFloatArray[working.getNumLayers()-1];
//		for (int layer = 0; layer < working.getNumLayers()-1; layer++) {
//			eTraces[layer] = new NDFloatArray(lastGradients[layer].getDimensions());
//			eTraces[layer].clear();
//		}
		outputs = null;
		
		depth = working.getNumLayers();
		deltas = new NDFloatArray[depth - 1];	// don't need deltas for input layer
		gradients = new NDFloatArray[depth - 1];
		for (int layer = depth - 1; layer > 0; layer--) {	// start with the last layer, work backward
			// deltas is one less than depth, offset one back from the current layer
			
			if (layer == depth - 1) {
				// If it's the output layer just use the output layer dimensions
				deltas[layer-1] = new NDFloatArray(working.getOutputDimension());	 	
				gradients[layer-1] = new NDFloatArray(deltas[layer-1].getDimensions(), working.getLayer(layer-1).getDimensions());
			} else {
				/* Otherwise, it's the dimensions of the output layer * the dimensions of the current layer.
				 * The reason is, that you calculate the delta for the previous layers by
				 * calculating the delta for each pair of output layer nodes and current layer nodes.
				 * Look at calcDeltaSum, it will make more sense.
				 */	
				deltas[layer-1] = new NDFloatArray(working.getOutputDimension(), working.getLayer(layer).getDimensions());
				gradients[layer-1] = new NDFloatArray(deltas[layer-1].getDimensions(), working.getLayer(layer-1).getDimensions());
			}
		}
	}
	
	@Override
	public void updateNet(NDFloatArray rewards) {
		updateNet(rewards, 1.0f);
	}
	
	@Override
	public void updateNet(NDFloatArray rewards, float learningRateModifier) {
		// deltas[] = method:CalcDeltas()
		// gradients[] = method:CalcGradients( deltas )
//		NDFloatArray deltas[] = calcDeltas();
//		NDFloatArray gradients[] = calcGradients( deltas );
		
		if (null != lastGradients) {
//			calcETraces();
			
			NDFloatArray tdErrs = calcTDErrs( rewards );
			
			for (int layer = 0; layer < working.getNumLayers()-1; layer++) {
				calcETraces(layer);
				for (MatrixIterator w = working.getWeights()[layer].iterator()
						; w.hasNext()
						; w.next() ) {
					int[] wCoords = w.getCurrentCoordinates();
					float newVal = workingTd.getAlpha(layer, wCoords) * learningRateModifier * sumTdErrAndETraces(layer, tdErrs, wCoords);
					//if (newVal > 0) System.out.println("change=" + newVal);
					w.set(w.get() + newVal);
				}
			}
		}
		
//		lastGradients = gradients;
		workingTd.decayLearningRates();
	}
	
	public void init() {

	}

	@Override
	public NDFloatArray next(NDFloatArray stimuli) {
		if (outputs != null) {
			if (null == lastOutputs) lastOutputs = outputs.clone();
			else outputs.copyTo(lastOutputs);
		}
		outputs = super.next(stimuli);
		
		if (lastOutputs != null) lastGradients = gradients;
//		NDFloatArray deltas[] = calcDeltas();
//		gradients = calcGradientsOld( deltas );
		calcGradients();
		
		return outputs;
	}
	
	@Override
	public NDFloatArray[] nextSequence(NDFloatArray[] stimuli) {
		NDFloatArray[] outs = new NDFloatArray[stimuli.length];
		// Do it this way to ensure gradient is calculated properly.  This needs to be optimized, since you could just calc gradients on the last go
		for (int i = 0; i < stimuli.length; i++)
			outs[i] = next(stimuli[i]);
		
		lastOutputs = outs[stimuli.length-2];
		
		return outs;
	}
	
	/**
	 * Calculate the delta terms for gradient descent for each layer;
	 * for Temporal Difference, use gradient of output with respect to weights.
	 * @return The NDFloatArray's of delta terms.
	 */
	protected NDFloatArray[] calcDeltas() {
		int depth = working.getNumLayers();
		NDFloatArray[] deltas = new NDFloatArray[depth - 1];	// don't need deltas for input layer
		
		for (int layer = depth - 1; layer > 0; layer--) {	// start with the last layer, work backward
			// deltas is one less than depth, offset one back from the current layer
			
			if (layer == depth - 1) {
				deltas[layer-1] = new NDFloatArray(working.getOutputDimension());	// If it's the output layer just use the output layer dimensions 	

				for (MatrixIterator delta = deltas[layer-1].iterator(); delta.hasNext(); delta.next()) {
					float deltaValue = working.getLayer(layer).getFromRawCoordinate( delta.getRawCoordinate() );	// For output deltas, use complete set of coords
					deltaValue = deltaValue * (1 - deltaValue);
					delta.set(deltaValue);
				}
			} else {
				/* Otherwise, it's the dimensions of the output layer * the dimensions of the current layer.
				 * The reason is, that you calculate the delta for the previous layers by
				 * calculating the delta for each pair of output layer nodes and current layer nodes.
				 * Look at calcDeltaSum, it will make more sense.
				 */	
				deltas[layer-1] = new NDFloatArray(working.getOutputDimension(), working.getLayer(layer).getDimensions());
					
				for (MatrixIterator delta = deltas[layer-1].iterator(); delta.hasNext(); delta.next()) {
					/* for non-output deltas, extract the source layer coordinates from the delta to get the node for the "output" calculation
					 * 
					 */
					int[] deltaCoords = delta.getCurrentCoordinates();
					int[] srcCoords = Util.getSrcCoordsFromCoordsSet(deltaCoords);
					int[] tgtCoords = Util.getTgtCoordsFromCoordsSet(deltaCoords);
					float deltaValue = working.getLayer(layer).get(srcCoords);	 
//					deltaValue = (deltaValue * (1 - deltaValue)) * calcDeltaSum(layer, deltas, delta);
					deltaValue = (deltaValue * (1 - deltaValue));
					// This will only work for 3 layer nets.  But who cares.
					deltaValue = deltaValue * deltas[layer].get(tgtCoords) * working.getWeights()[layer].get(tgtCoords, srcCoords);
					delta.set(deltaValue);
				}
			}
		}
		
		return deltas;
	}
	
	/**
	 * Calculate the gradients;
	 */
	protected void calcGradients() {	
		for (int layer = depth - 1; layer > 0; layer--) {	// start with the last layer, work backward
			// deltas is one less than depth, offset one back from the current layer
			
			if (layer == depth - 1) {
	
				for (MatrixIterator delta = deltas[layer-1].iterator(); delta.hasNext(); delta.next()) {
					int deltaRawCoord = delta.getRawCoordinate();
					int[] deltaCoords = delta.getCurrentCoordinates();
					float deltaValue = working.getLayer(layer).getFromRawCoordinate( deltaRawCoord );	// For output deltas, use complete set of coords
					deltaValue = deltaValue * (1 - deltaValue);
					delta.set(deltaValue);
					for (MatrixIterator incomingActivation = working.getLayer(layer - 1).iterator()
							; incomingActivation.hasNext()
							; incomingActivation.next()) {
						float value = deltaValue * incomingActivation.get();
						gradients[layer-1].set(value, Util.joinCoords(deltaCoords, incomingActivation.getCurrentCoordinates()));
					}
				}
			} else {
					
				for (MatrixIterator delta = deltas[layer-1].iterator(); delta.hasNext(); delta.next()) {
					/* for non-output deltas, extract the source layer coordinates from the delta to get the node for the "output" calculation
					 * 
					 */
					int[] deltaCoords = delta.getCurrentCoordinates();
					int[] srcCoords = Util.getSrcCoordsFromCoordsSet(deltaCoords);
					int[] tgtCoords = Util.getTgtCoordsFromCoordsSet(deltaCoords);
					float deltaValue = working.getLayer(layer).get(srcCoords);	 
//					deltaValue = (deltaValue * (1 - deltaValue)) * calcDeltaSum(layer, deltas, delta);
					deltaValue = (deltaValue * (1 - deltaValue));
					// This will only work for 3 layer nets.  But who cares.
					deltaValue = deltaValue * deltas[layer].get(tgtCoords) * working.getWeights()[layer].get(tgtCoords, srcCoords);
					delta.set(deltaValue);
					
					for (MatrixIterator incomingActivation = working.getLayer(layer-1).iterator()
							; incomingActivation.hasNext()
							; incomingActivation.next()) {
						float value = deltaValue * incomingActivation.get();
						gradients[layer-1].set(value, Util.joinCoords(deltaCoords, incomingActivation.getCurrentCoordinates()));
					}
				}
			}
		}
	}


	protected NDFloatArray[] calcGradientsOld(NDFloatArray[] deltas) {
		int depth = working.getNumLayers();
		NDFloatArray[] gradients = new NDFloatArray[depth - 1];
		for (int layer = 0; layer < depth - 1; layer++) {
			
			gradients[layer] = new NDFloatArray(
					// deltas will be 0 thru depth-2 representing layers 1 thru depth-1
					deltas[layer].getDimensions()
					// but need the incoming activation value, hence the same numerical layer is used
					, working.getLayer(layer).getDimensions());
			
			for (MatrixIterator delta = deltas[layer].iterator(); delta.hasNext(); delta.next()) {
				int[] deltaCoords = delta.getCurrentCoordinates();
				for (MatrixIterator incomingActivation = working.getLayer(layer).iterator()
						; incomingActivation.hasNext()
						; incomingActivation.next()) {
					float value = delta.get() * incomingActivation.get();
					gradients[layer].set(value, Util.joinCoords(deltaCoords, incomingActivation.getCurrentCoordinates()));
				}
			}
		}		
		return gradients;
	}

	/**
	 * Calculate the temporal difference error.
	 * @param rewards
	 * @return
	 */
	protected NDFloatArray calcTDErrs(NDFloatArray rewards) {
		NDFloatArray tdErrs = new NDFloatArray(working.getOutputs().getDimensions());
		
		for (MatrixIterator tdErr = tdErrs.iterator(); tdErr.hasNext(); tdErr.next()) {
			int[] tdErrCoords = tdErr.getCurrentCoordinates();
			int tdRawCoord = tdErr.getRawCoordinate();
			float value = rewards.get(tdErrCoords)
				// gammas should have one additional layer beyond weights, for the output
				+ workingTd.getOutputGamma(tdErrCoords) 
				* working.getOutputs().getFromRawCoordinate(tdRawCoord)
				- lastOutputs.getFromRawCoordinate(tdRawCoord);
			
			tdErr.set(value);
		}
		
		return tdErrs;
	}

	protected void calcETraces() {
//		if (null == eTraces) eTraces = new NDFloatArray[working.getNumLayers()-1];
		
		for (int layer = 0; layer < working.getNumLayers()-1; layer++) {
			if (null == eTraces[layer]) {
				eTraces[layer] = new NDFloatArray(lastGradients[layer].getDimensions());
				eTraces[layer].clear();
			}
			
			for (MatrixIterator gradient = lastGradients[layer].iterator(); gradient.hasNext(); gradient.next()) {
				int[] gradientCoords = gradient.getCurrentCoordinates();
				int[] wgtCoords = getWgtCoordsFromGradient(gradientCoords, layer);
				float value = workingTd.getGamma(layer, wgtCoords) * workingTd.getLambda(layer, wgtCoords) * eTraces[layer].get(gradientCoords) + gradient.get();
				eTraces[layer].set(value, gradientCoords);
			}
		}
	}

	protected void calcETraces(int layer) {
//		if (null == eTraces) eTraces = new NDFloatArray[working.getNumLayers()-1];
		
//		for (int layer = 0; layer < working.getNumLayers()-1; layer++) {
			if (null == eTraces[layer]) {
				eTraces[layer] = new NDFloatArray(lastGradients[layer].getDimensions());
				eTraces[layer].clear();
			}
			
			for (MatrixIterator gradient = lastGradients[layer].iterator(); gradient.hasNext(); gradient.next()) {
				int[] gradientCoords = gradient.getCurrentCoordinates();
				int[] wgtCoords = getWgtCoordsFromGradient(gradientCoords, layer);
				float value = workingTd.getGamma(layer, wgtCoords) * workingTd.getLambda(layer, wgtCoords) * eTraces[layer].get(gradientCoords) + gradient.get();
				eTraces[layer].set(value, gradientCoords);
			}
//		}
	}
	
	/**
	 * 
	 * @param gradient
	 * @return
	 */
	protected int[] getWgtCoordsFromGradient(int[] gradientCoords, int layer) {
		/* If we are working with the gradient for the top set of weights, i.e., the set between the top layer and the next to top layer
		 * then the coordinates of this gradient represent the weight
		 */
		if (layer == working.getNumLayers()-2) {
			return gradientCoords;
		}
		
		// Otherwise, extract the weight coords from the coords 
		return Util.getCoordSubset(gradientCoords, working.getOutputDimension().length, gradientCoords.length - 1);
	}


	protected float sumTdErrAndETraces(int layer, NDFloatArray tdErrs, int[] currentCoordinates) {
		float sum = 0;
		// For output layer, etraces are 1:1 with weights, ie, the weight is only associated with one tdErr.
		if (layer == working.getNumLayers()-2) {
			sum = tdErrs.get(Util.getTgtCoordsFromCoordsSet(currentCoordinates)) * eTraces[layer].get(currentCoordinates);
		} else {
			// Other wise, sum the tdErr and tdErr products
			for (MatrixIterator tdErr = tdErrs.iterator(); tdErr.hasNext(); tdErr.next()) {
				sum += tdErr.get() * eTraces[layer].get(tdErr.getCurrentCoordinates(), currentCoordinates);
			}
			
		}
		
		return sum;
	}

	
	public static void main(String[] args) {
		int inDim = 7;
		int midDim = 7;
		int outDim = 2;
		int[][] layerDimensions = new int[][] { {inDim}, {midDim}, {outDim} };
		RandomGridNet base = RandomGridNet.getRandomGridNet(3, layerDimensions);

		NDFloatArray[] gamma = new NDFloatArray[1];
		gamma[0] = new NDFloatArray(new int[] {1});
		gamma[0].set(0.3f, 0);

		NDFloatArray[] alpha = new NDFloatArray[1];
		alpha[0] = new NDFloatArray(new int[] {1});
		alpha[0].set(0.5f, 0);

		NDFloatArray[] lambda = new NDFloatArray[1];
		lambda[0] = new NDFloatArray(new int[] {1});
		lambda[0].set(0.7f, 0);

		NDFloatArray[] decay = new NDFloatArray[1];
		decay[0] = new NDFloatArray(new int[] {1});
		decay[0].set(0.000004f, 0);

		GridNetTD net = new GridNetTD(
				base
				, gamma
				, lambda
				, alpha
				, null
				, null
				, decay
				, LearningRateGranularity.SINGLE
				, LearningRateGranularity.SINGLE
				, LearningRateGranularity.SINGLE
				, LearningRateGranularity.SINGLE
				, LearningRateGranularity.SINGLE
				, LearningRateGranularity.SINGLE
				, false
				, false
				, true
		);

		ActivatorNDTemporalDifference activator = new ActivatorNDTemporalDifference(net);

		boolean solved;
		int count, position;
		boolean verbose = true;
		testFlag = false;
		int iterations = 10000;
		int bpoint = 1000;
		int asize = iterations / bpoint;
		double best = 100;
		double worst = 0;
		double[] avg = new double[asize];	// 5 tiers
		for (int i = 0; i < asize; i++) avg[i] = 0;

		for (int i = 0; i < iterations && best > 2.25 &&  ((GridNetTD)activator.getWorkingActivator()).getAlpha(0, 0) > 0 ; i++) {
			count = 0; 
			position = getRandomPosition(activator);
			solved = false;
			
			if (verbose) System.out.println("Iteration " + i + " starting position=" + position);
			
			while (!solved && count < 5 * inDim) {
				count++;
				
				// prepare input
				NDFloatArray in = new NDFloatArray(new int[]{inDim});
				in.clear();
				in.set(1.0f, new int[] {position});
				
				// activate
				NDFloatArray out = activator.next(in);
				
				// process output
				int lastPosition = position;
				
				if (Math.random() <= 0.999999999)
					position += out.getCoordinatesOfMaxValue()[0] == 0 ? -1 : 1;
				
				// epsilon
				else {
					position += (Math.random() > 0.5) ? 1 : -1;
				}
				
				if (verbose) System.out.println("Iteration " + i + ", count=" + count + ", moved to position: " + position);
				
				NDFloatArray reward = new NDFloatArray(new int[] {outDim});
				reward.clear();
				
				// update 
				if (position == 0) {
					solved = true;
					reward.set(1.0f, 0);
//					reward.set(1.0f, 1);
				} else if (position == inDim -1) {
					solved = true;
//					reward.set(1.0f, 0);
					reward.set(1.0f, 1);
				} else if (position == (int) ((double)inDim / 2.0)) {
					if (lastPosition > position) {
						reward.set(-1.0f, 0);
//						reward.set(0.1f, 1);
					} else {
//						reward.set(0.0f, 0);
						reward.set(-1.0f, 1);
					}
				} else if (position > (inDim /2)+1) {
//					reward.set(0.5f, 0);
					reward.set(0.2f, 1);
				} else if (position < (inDim /2)-1) {
					reward.set(0.2f, 0);
//					reward.set(0.5f, 1);
				}
				
				activator.updateNet(reward);
				
			}
			if (solved) {
				System.out.println("Iteration " + i + " solved after: " + count + " ");
			} else {
				System.out.println("Iteration " + i + " not solved after: " + count + " ");
			}
			if (verbose) System.out.println("Gamma " + ((GridNetTD)activator.getWorkingActivator()).getGamma(0, 0) + "\n");
			if (verbose) System.out.println("Alpha " + ((GridNetTD)activator.getWorkingActivator()).getAlpha(0, 0) + "\n");
			
			avg[i/bpoint] += (double)count / bpoint;
			if ((i+1) % bpoint == 0) {
				System.out.println("***** AVG " + avg[i/bpoint] + "\n");
				if (avg[i/bpoint] < best ) {
					best = avg[i/bpoint];
				}
				if (avg[i/bpoint] > worst) {
					worst = avg[i/bpoint];
				}
			}
		}
		System.out.println("Alpha " + ((GridNetTD)activator.getWorkingActivator()).getAlpha(0, 0) + "\n");
		System.out.println("***** BEST " + best + "\n");
		System.out.println("***** WORST " + worst + "\n");
		for (double a : avg) {
			System.out.println(": " + a);
		}

	}

	/**
	 * FOR TESTING ONLY
	 * @param activator
	 * @return
	 */
	private static int getRandomPosition(ActivatorNDTemporalDifference activator) {
		int len = activator.getInputDimension()[0];
//		int min = 1;
//		int max = len - 2;
//		int min = len / 2 - 1;
//		int max = len / 2 + 1;
//		
//		int val = len/2;
//		while (val == len/2) val = (int) (Math.floor((max-min+1) * Math.random()) + min);
		int val = len/2 + (testFlag ? 1 : -1);
		testFlag = !testFlag;
		return val;
	}

}


/*
// Left-Right walk test
int inDim = 7;
int midDim = 7;
int outDim = 2;
int[][] layerDimensions = new int[][] { {inDim}, {midDim}, {outDim} };
RandomGridNet base = RandomGridNet.getRandomGridNet(3, layerDimensions);

NDFloatArray[] gamma = new NDFloatArray[1];
gamma[0] = new NDFloatArray(new int[] {1});
gamma[0].set(0.3f, 0);

NDFloatArray[] alpha = new NDFloatArray[1];
alpha[0] = new NDFloatArray(new int[] {1});
alpha[0].set(0.4f, 0);

NDFloatArray[] lambda = new NDFloatArray[1];
lambda[0] = new NDFloatArray(new int[] {1});
lambda[0].set(1.0f, 0);

NDFloatArray[] decay = new NDFloatArray[1];
decay[0] = new NDFloatArray(new int[] {1});
decay[0].set(0.000002f, 0);

GridNetTD net = new GridNetTD(
		base
		, gamma
		, lambda
		, alpha
		, null
		, null
		, decay
		, LearningRateGranularity.SINGLE
		, LearningRateGranularity.SINGLE
		, LearningRateGranularity.SINGLE
		, LearningRateGranularity.SINGLE
		, LearningRateGranularity.SINGLE
		, LearningRateGranularity.SINGLE
		, false
		, false
		, true
);

ActivatorNDTemporalDifference activator = new ActivatorNDTemporalDifference(net);

boolean solved;
int count, position;
boolean verbose = false;
testFlag = false;
int iterations = 100000;
int bpoint = 20000;
int asize = iterations / bpoint;
double best = 100;
double worst = 0;
double[] avg = new double[asize];	// 5 tiers
for (int i = 0; i < asize; i++) avg[i] = 0;

for (int i = 0; i < iterations && best > 2.8 && ((GridNetTD)activator.getWorkingActivator()).getAlpha(0, 0) > 0 ; i++) {
	count = 0; 
	position = getRandomPosition(activator);
	solved = false;
	
	if (verbose) System.out.println("Iteration " + i + " starting position=" + position);
	
	while (!solved && count < 4 * inDim) {
		count++;
		
		// prepare input
		NDFloatArray in = new NDFloatArray(new int[]{inDim});
		in.clear();
		in.set(1.0f, new int[] {position});
		
		// activate
		NDFloatArray out = activator.next(in);
		
		// process output
		int lastPosition = position;
		
		if (Math.random() <= 0.98)
			position += out.getCoordinatesOfMaxValue()[0] == 0 ? -1 : 1;
		
		// epsilon
		else {
			position += (Math.random() > 0.5) ? 1 : -1;
		}
		
		if (verbose) System.out.println("Iteration " + i + ", count=" + count + ", moved to position: " + position);
		
		NDFloatArray reward = new NDFloatArray(new int[] {outDim});
		reward.clear();
		
		// update 
		if (position == 0) {
			solved = true;
			reward.set(1.0f, 0);
//			reward.set(1.0f, 1);
		} else if (position == inDim -1) {
			solved = true;
//			reward.set(1.0f, 0);
			reward.set(1.0f, 1);
		} else if (position == (int) ((double)inDim / 2.0)) {
			if (lastPosition > position)
				reward.set(-1.0f, 0);
			else
				reward.set(-1.0f, 1);
		} else if (position > (inDim /2)+1) {
//			reward.set(0.5f, 0);
//			reward.set(0.5f, 1);
		} else if (position < (inDim /2)-1) {
//			reward.set(0.5f, 0);
//			reward.set(0.5f, 1);
		}
		
		activator.updateNet(reward);
		
	}
	if (solved) {
		System.out.println("Iteration " + i + " solved after: " + count + " ");
	} else {
		System.out.println("Iteration " + i + " not solved after: " + count + " ");
	}
	if (verbose) System.out.println("Gamma " + ((GridNetTD)activator.getWorkingActivator()).getGamma(0, 0) + "\n");
	if (verbose) System.out.println("Alpha " + ((GridNetTD)activator.getWorkingActivator()).getAlpha(0, 0) + "\n");
	
	avg[i/bpoint] += (double)count / bpoint;
	if ((i+1) % bpoint == 0) {
		System.out.println("***** AVG " + avg[i/bpoint] + "\n");
		if (avg[i/bpoint] < best ) {
			best = avg[i/bpoint];
		}
		if (avg[i/bpoint] > worst) {
			worst = avg[i/bpoint];
		}
	}
}
System.out.println("Alpha " + ((GridNetTD)activator.getWorkingActivator()).getAlpha(0, 0) + "\n");
System.out.println("***** BEST " + best + "\n");
System.out.println("***** WORST " + worst + "\n");
for (double a : avg) {
	System.out.println(": " + a);
}
*/



/* ***************************** Right walk test
		// Left-Right walk test
		int inDim = 7;
		int midDim = 6;
		int outDim = 2;
		int[][] layerDimensions = new int[][] { {inDim}, {midDim}, {outDim} };
		RandomGridNet base = RandomGridNet.getRandomGridNet(3, layerDimensions);
		
//		NDFloatArray[] weights = base.getWeights();
//		weights[0].set(0.6f, 0,0);
//		weights[0].set(0.4f, 0,1);
//		weights[0].set(0.7f, 0,2);
//		weights[0].set(0.3f, 1,0);
//		weights[0].set(0.4f, 1,1);
//		weights[0].set(0.6f, 1,2);
//		weights[1].set(0.5f, 0,0);
//		weights[1].set(0.8f, 0,1);
//		weights[1].set(0.7f, 1,0);
//		weights[1].set(0.2f, 1,1);
//		
//		NDFloatArray[] bias = base.getBias();
//		bias[0].set(0.5f, 0);
//		bias[0].set(0.5f, 1);
//		bias[1].set(0.5f, 0);
//		bias[1].set(0.5f, 1);

		
		NDFloatArray[] gamma = new NDFloatArray[1];
		gamma[0] = new NDFloatArray(new int[] {1});
		gamma[0].set(0.5f, 0);
		
		NDFloatArray[] alpha = new NDFloatArray[1];
		alpha[0] = new NDFloatArray(new int[] {1});
		alpha[0].set(0.5f, 0);
		
		NDFloatArray[] lambda = new NDFloatArray[1];
		lambda[0] = new NDFloatArray(new int[] {1});
		lambda[0].set(0.5f, 0);
		
//		NDFloatArray[] decay = new NDFloatArray[1];
//		decay[0] = new NDFloatArray(new int[] {1});
//		decay[0].set(0.00005f, 0);
		
		GridNetTD net = new GridNetTD(
				base
				, gamma
				, lambda
				, alpha
				, null
				, null
				, null
				, LearningRateGranularity.SINGLE
				, LearningRateGranularity.SINGLE
				, LearningRateGranularity.SINGLE
				, LearningRateGranularity.SINGLE
				, LearningRateGranularity.SINGLE
				, LearningRateGranularity.SINGLE
				, false
				, false
				, false
		);
		
		ActivatorNDTemporalDifference activator = new ActivatorNDTemporalDifference(net);
		

boolean solved;
int count, position;
boolean verbose = true;
testFlag = false;
int iterations = 100;
int bpoint = 20;
int asize = iterations / bpoint;
double best = 100;
double worst = 0;
double[] avg = new double[asize];	// 5 tiers
for (int i = 0; i < asize; i++) avg[i] = 0;

for (int i = 0; i < iterations && best > 2 && ((GridNetTD)activator.getWorkingActivator()).getAlpha(0, 0) > 0 ; i++) {
	count = 0; 
//	position = getRandomPosition(activator);
	position = 0;
	solved = false;
	
	if (verbose) System.out.println("Iteration " + i + " starting position=" + position);
	
	while (!solved && count < 4 * inDim) {
		count++;
		
		// prepare input
		NDFloatArray in = new NDFloatArray(new int[]{inDim});
		in.clear();
		in.set(1.0f, new int[] {position});
		
		// activate
		NDFloatArray out = activator.next(in);
		
		// process output
		int lastPosition = position;
		
//		if (Math.random() <= 0.999999999999999)
			position += out.getCoordinatesOfMaxValue()[0] == 0 ? -1 : 1;
		
		// epsilon
//		else {
//			position += (Math.random() > 0.5) ? 1 : -1;
//		}
		
		if (position < 0) position = 0;
		
		if (verbose) System.out.println("Iteration " + i + ", count=" + count + ", moved to position: " + position);
		
		NDFloatArray reward = new NDFloatArray(new int[] {outDim});
		reward.clear();
		
//		// update 
//		if (position == 0) {
////			solved = true;
////			reward.set(1.0f, 0);
////			reward.set(1.0f, 1);
//		} else if (position == inDim -1) {
//			solved = true;
////			reward.set(1.0f, 0);
//			reward.set(1.0f, 1);
//		} else if (position == (int) ((double)inDim / 2.0)) {
////			if (lastPosition > position)
////				reward.set(-1.0f, 0);
////			else
////				reward.set(-1.0f, 1);
//		} else if (position > (inDim /2)+1) {
////			reward.set(0.5f, 0);
////			reward.set(0.5f, 1);
//		} else if (position < (inDim /2)-1) {
////			reward.set(0.5f, 0);
////			reward.set(0.5f, 1);
//		} else {
//			
//		}
		
		if (position == inDim - 1) {
			solved = true;
			reward.set(0.0f, 0);
			reward.set(1.0f, 1);
		} else if (position > lastPosition) {
			reward.set(0.0f, 0);
			reward.set(0.1f, 1);
		} else if (position < lastPosition || position == lastPosition) {
			reward.set(-1.0f, 0);
			reward.set(0.25f, 1);
		}
		
		activator.updateNet(reward);
		
	}
	if (solved) {
		System.out.println("Iteration " + i + " solved after: " + count + " ");
	} else {
		System.out.println("Iteration " + i + " not solved after: " + count + " ");
	}
	if (verbose) System.out.println("Gamma " + ((GridNetTD)activator.getWorkingActivator()).getGamma(0, 0) + "\n");
	if (verbose) System.out.println("Alpha " + ((GridNetTD)activator.getWorkingActivator()).getAlpha(0, 0) + "\n");
	
	avg[i/bpoint] += (double)count / bpoint;
	if ((i+1) % bpoint == 0) {
		System.out.println("***** AVG " + avg[i/bpoint] + "\n");
		if (avg[i/bpoint] < best ) {
			best = avg[i/bpoint];
		}
		if (avg[i/bpoint] > worst) {
			worst = avg[i/bpoint];
		}
	}
}
System.out.println("Alpha " + ((GridNetTD)activator.getWorkingActivator()).getAlpha(0, 0) + "\n");
System.out.println("***** BEST " + best + "\n");
System.out.println("***** WORST " + worst + "\n");
for (double a : avg) {
	System.out.println(": " + a);
}
*/