package com.anji.hyperneat.onlinereinforcement;

import com.anji.hyperneat.nd.NDFloatArray;
import com.anji.hyperneat.nd.NDFloatArray.MatrixIterator;
import com.anji.hyperneat.onlinereinforcement.ActivatorNDLR.LearningRateGranularity;

public class TDTest2 {

	/**
	 * @param args
	 */
	public static void main(String[] args) {
		if (0 == args.length) System.exit(0);
		
		if ("testOne".equals(args[0])) testOne();
		if ("testTwo".equals(args[0])) testTwo();
	}
	
	// 3x1 sequence
	public static void testOne() {
		int inDim = 3;
		int midDim = 3;
		int outDim = 3;
		int[][] layerDimensions = new int[][] { {inDim}, {midDim}, {outDim} };
		
		ActivatorNdTdZero activator = buildActivator(layerDimensions);
		
		NDFloatArray inputs = new NDFloatArray(new int[] {inDim});
		NDFloatArray outputs, reward;
		
		boolean solved = false;
		int position = 0, count = 0, iterations = 50, best = Integer.MAX_VALUE, correctCount = 0;
		int outPos, lastPos;
		
		for (int i = 0; i < iterations && best > 3.25 &&  ((GridNetTD)activator.getWorkingActivator()).getAlpha(0, 0) > 0 ; i++) {
			solved = false;
			correctCount = 0;
			count = 0;
			while (!solved && count < 5000) {
				
				// set inputs
				inputs.clear();
				inputs.set(1.0f, position);
				lastPos = position;
				
				// activate
				outputs = activator.next(inputs);
				outPos = outputs.getCoordinatesOfMaxValue()[0];
//				position = outPos;
				
				// score output
				reward = new NDFloatArray(new int[] {outDim});
				reward.clear();
				int expected = (lastPos + 1) % 3;
				if (position == expected) {
					reward.set(0.5f, outPos);
					correctCount++;
					if (correctCount ==2) 
						System.out.println("2");
				} else {
					reward.set(-.5f, outPos);
					correctCount = 0;
				}
				activator.updateNet(reward);
				
				position = expected;
				
				if (correctCount == 3) solved = true;
				count++;
			}
			System.out.println("Iteration " + i + ": solved? " + solved + "; count: " + count + ".");
			System.out.println("Alpha: " +((GridNetTD)activator.getWorkingActivator()).getAlpha(0, 0));
			if (count < best) best = count;
		}
	}

	/**
	 * sequence
	 * 000	111
		111	101
		101	010
		010	000
	 */
	public static void testTwo() {
		int inDim = 3;
		int midDim = 3;
		int outDim = 3;
		int[][] layerDimensions = new int[][] { {inDim}, {midDim}, {outDim} };

		ActivatorNdTdZero activator = buildActivator(layerDimensions);
		
		NDFloatArray inputs = new NDFloatArray(new int[] {inDim});
		NDFloatArray outputs, reward, expected, translated;
		NDFloatArray state = new NDFloatArray(new int[] {inDim});
		
		
		boolean solved = false;
		int position = 0, iterations = 100000, correctCount = 0;
		int outPos, lastPos;
		
		
		for (int i = 0; i < iterations && !solved && ((GridNetTD)activator.getWorkingActivator()).getAlpha(0, 0) > 0 ; i++) {
			
			correctCount = 0;
			for (int count = 0; count < 4; count++) {
				
				// set inputs
				inputs = state;				
				
				// activate
				outputs = activator.next(inputs);
				
				// score output
				
				expected = setExpected(state);
				translated = translate(outputs);
				
				if (match(expected, translated)) {
					
					
					correctCount++;
//					if (correctCount > 1) 
//						System.out.println(correctCount);
				} else {
//					reward = penalty(expected, outputs);
				}
				
				state = expected;
				
			}
			
			reward = new NDFloatArray(new int[] {outDim});
			reward.clear();
			
			if (correctCount == 4) {
				solved = true;
				reward.set(0.0f, 0);
				reward.set(0.0f, 1);
				reward.set(0.0f, 2);
				activator.updateNet(reward);
			} else if (correctCount==3 || correctCount == 2) {
				reward.set(-0.5f, 0);
				reward.set(-0.5f, 1);
				reward.set(-0.5f, 2);
				activator.updateNet(reward);
			} else if (correctCount == 0 || correctCount == 1) {
				reward.set(-1.0f, 0);
				reward.set(-1.0f, 1);
				reward.set(-1.0f, 2);
				activator.updateNet(reward);
			}
						
			
			System.out.println("Iteration " + i + ": solved? " + solved + "; correct: " + correctCount);
			System.out.println("Alpha: " +((GridNetTD)activator.getWorkingActivator()).getAlpha(0, 0));
//			if (count < best) best = count;
		}
	}

	private static boolean match(NDFloatArray expected, NDFloatArray translated) {
		for (MatrixIterator ex = expected.iterator(); ex.hasNext(); ex.next()) {
			if (ex.get() != translated.getFromRawCoordinate(ex.getRawCoordinate()))
				return false;
		}
		return true;
	}
	
	private static NDFloatArray penalty(NDFloatArray expected, NDFloatArray outs) {
		NDFloatArray  pen = new NDFloatArray(outs.getDimensions());
		for (MatrixIterator out = outs.iterator(); out.hasNext(); out.next()) {
			pen.set(expected.get(out.getCurrentCoordinates())-out.get(), out.getCurrentCoordinates());
		}
		return pen;
	}

	private static NDFloatArray setExpected(NDFloatArray state) {
		NDFloatArray expected = new NDFloatArray(state.getDimensions());
		
		if (state.get(0) == 0 && state.get(1) == 0 && state.get(2) == 0) {
			expected.set(1.0f, 0);
			expected.set(1.0f, 1);
			expected.set(1.0f, 2);
		}
		
		if (state.get(0) == 1 && state.get(1) == 1 && state.get(2) == 1) {
			expected.set(1.0f, 0);
			expected.set(0.0f, 1);
			expected.set(1.0f, 2);
		}
		
		if (state.get(0) == 1 && state.get(1) == 0 && state.get(2) == 1) {
			expected.set(0.0f, 0);
			expected.set(1.0f, 1);
			expected.set(0.0f, 2);
		}
		
		if (state.get(0) == 0 && state.get(1) == 1 && state.get(2) == 0) {
			expected.set(0.0f, 0);
			expected.set(0.0f, 1);
			expected.set(0.0f, 2);
		}
		return expected;
	}
	
	private static NDFloatArray translate(NDFloatArray outs) {
		NDFloatArray tx = new NDFloatArray(outs.getDimensions());
		for (MatrixIterator it = outs.iterator(); it.hasNext(); it.next()) {
			if (it.get() < 0.5) tx.set(0, it.getCurrentCoordinates());
			else  tx.set(1.0f, it.getCurrentCoordinates());
		}
		return tx;
	}

	/**
	 * @param layerDimensions
	 * @return
	 */
	protected static ActivatorNdTdZero buildActivator(int[][] layerDimensions) {
		RandomGridNet base = RandomGridNet.getRandomGridNet(3, layerDimensions);
	
		NDFloatArray[] gamma = new NDFloatArray[1];
		gamma[0] = new NDFloatArray(new int[] {1});
		gamma[0].set(0.5f, 0);
	
		NDFloatArray[] alpha = new NDFloatArray[1];
		alpha[0] = new NDFloatArray(new int[] {1});
		alpha[0].set(0.5f, 0);
	
		NDFloatArray[] lambda = new NDFloatArray[1];
		lambda[0] = new NDFloatArray(new int[] {1});
		lambda[0].set(0.5f, 0);
	
		NDFloatArray[] decay = new NDFloatArray[1];
		decay[0] = new NDFloatArray(new int[] {1});
		decay[0].set(0.0000004f, 0);
	
		GridNetTD net = new GridNetTD(
				base
				, gamma
				, lambda
				, alpha
				, null
				, null
				, decay
				, LearningRateGranularity.SINGLE
				, LearningRateGranularity.SINGLE
				, LearningRateGranularity.SINGLE
				, LearningRateGranularity.SINGLE
				, LearningRateGranularity.SINGLE
				, LearningRateGranularity.SINGLE
				, false
				, false
				, true
		);
	
		ActivatorNdTdZero activator = new ActivatorNdTdZero(net);
		return activator;
	}
}
