package com.anji.hyperneat.onlinereinforcement;

import java.util.List;

import com.anji.hyperneat.nd.GridNetND;
import com.anji.hyperneat.nd.NDFloatArray;
import com.anji.hyperneat.nd.NDFloatArray.MatrixIterator;
import com.anji.nn.activationfunction.ActivationFunction;
import com.anji.topology.Layer;


/**
 * Tightly coupled with ActivatorNDTemporalDifference
 * @author slusk
 *
 */
public class GridNetTD extends GridNetND implements ActivatorNDLR {

	/* The TD gamma parameter - controls how tp weight current vs future rewards;
	 *  lower values = future rewards are valued less.
	 */
	protected NDFloatArray gammas[];
	protected NDFloatArray gammaDecays[];
	
	/* The TD lambda parameter - discounts past gradients
	 * lambda = 1 equivalent to supervised learning with observation-outcome pairs as its training set;
	 * lambda = 0 equivalent to supervised learning where training pairs are: states and the predictions made on the 
	 * 	immediately following state.
	 *  
	 */
	protected NDFloatArray lambdas[];
	protected NDFloatArray lambdaDecays[];
	
	// Weight change learning rate
	protected NDFloatArray alphas[];
	protected NDFloatArray alphaDecays[];
	
	protected LearningRateGranularity gammaGranularity;
	protected LearningRateGranularity lambdaGranularity;
	protected LearningRateGranularity alphaGranularity;
	
	protected LearningRateGranularity gammaDecayGranularity;
	protected LearningRateGranularity lambdaDecayGranularity;
	protected LearningRateGranularity alphaDecayGranularity;
	
	protected boolean useGammaDecay;
	protected boolean useLambdaDecay;
	protected boolean useAlphaDecay;
	
	
	public GridNetTD(NDFloatArray[] layers
			, NDFloatArray[] weights
			, NDFloatArray[] bias
			, NDFloatArray[] gammas
			, NDFloatArray[] lambdas
			, NDFloatArray[] alphas
			, NDFloatArray[] gammaDecays
			, NDFloatArray[] lambdaDecays
			, NDFloatArray[] alphaDecays
			, LearningRateGranularity gammaGranularity
			, LearningRateGranularity lambdaGranularity
			, LearningRateGranularity alphaGranularity
			, LearningRateGranularity gammaDecayGranularity
			, LearningRateGranularity lambdaDecayGranularity
			, LearningRateGranularity alphaDecayGranularity
			, boolean useGammaDecay
			, boolean useLambdaDecay
			, boolean useAlphaDecay
			, ActivationFunction activationFunction
			, int maxDimensions
			, int cyclesPerStep
			, boolean enableBias
			, String name
		) {
		super(
				layers
				, weights
				, bias
				, activationFunction
				, maxDimensions
				, cyclesPerStep
				, enableBias
				, name
		);
		this.gammas = gammas;
		this.lambdas = lambdas;
		this.alphas = alphas;
		this.gammaDecays = gammaDecays;
		this.lambdaDecays = lambdaDecays;
		this.alphaDecays = alphaDecays;
		this.gammaGranularity = gammaGranularity;
		this.lambdaGranularity = lambdaGranularity;
		this.alphaGranularity = alphaGranularity;
		this.gammaDecayGranularity = gammaDecayGranularity;
		this.lambdaDecayGranularity = lambdaDecayGranularity;
		this.alphaDecayGranularity = alphaDecayGranularity;
		this.useGammaDecay = useGammaDecay;
		this.useLambdaDecay = useLambdaDecay;
		this.useAlphaDecay = useAlphaDecay;
	}

	public GridNetTD(GridNetND base
			, NDFloatArray[] gammas
			, NDFloatArray[] lambdas
			, NDFloatArray[] alphas
			, NDFloatArray[] gammaDecays
			, NDFloatArray[] lambdaDecays
			, NDFloatArray[] alphaDecays
			, LearningRateGranularity gammaGranularity
			, LearningRateGranularity lambdaGranularity
			, LearningRateGranularity alphaGranularity
			, LearningRateGranularity gammaDecayGranularity
			, LearningRateGranularity lambdaDecayGranularity
			, LearningRateGranularity alphaDecayGranularity
			, boolean useGammaDecay
			, boolean useLambdaDecay
			, boolean useAlphaDecay
	) {
		super(base.getLayers(), base.getWeights(), base.getBias(), base.getActivationFunction(), base.getMaxDimensions(), base.getCyclesPerStep(), base.getEnableBias(), base.getName());
		this.gammas = gammas;
		this.lambdas = lambdas;
		this.alphas = alphas;
		this.gammaDecays = gammaDecays;
		this.lambdaDecays = lambdaDecays;
		this.alphaDecays = alphaDecays;
		this.gammaGranularity = gammaGranularity;
		this.lambdaGranularity = lambdaGranularity;
		this.alphaGranularity = alphaGranularity;
		this.gammaDecayGranularity = gammaDecayGranularity;
		this.lambdaDecayGranularity = lambdaDecayGranularity;
		this.alphaDecayGranularity = alphaDecayGranularity;
		this.useGammaDecay = useGammaDecay;
		this.useLambdaDecay = useLambdaDecay;
		this.useAlphaDecay = useAlphaDecay;
	}
	
	public GridNetTD(List<Layer> allLayersList
			, NDFloatArray[] weights
			, NDFloatArray[] bias
			, NDFloatArray[] gammas
			, NDFloatArray[] lambdas
			, NDFloatArray[] alphas
			, NDFloatArray[] gammaDecays
			, NDFloatArray[] lambdaDecays
			, NDFloatArray[] alphaDecays
			, LearningRateGranularity gammaGranularity
			, LearningRateGranularity lambdaGranularity
			, LearningRateGranularity alphaGranularity
			, LearningRateGranularity gammaDecayGranularity
			, LearningRateGranularity lambdaDecayGranularity
			, LearningRateGranularity alphaDecayGranularity
			, boolean useGammaDecay
			, boolean useLambdaDecay
			, boolean useAlphaDecay
			, ActivationFunction activationFunction
			, int maxDimensions
			, int cyclesPerStep
			, boolean enableBias
			, String name) {
		super(allLayersList, weights, bias, activationFunction, maxDimensions, cyclesPerStep, enableBias, name);
		this.gammas = gammas;
		this.lambdas = lambdas;
		this.alphas = alphas;
		this.gammaDecays = gammaDecays;
		this.lambdaDecays = lambdaDecays;
		this.alphaDecays = alphaDecays;
		this.gammaGranularity = gammaGranularity;
		this.lambdaGranularity = lambdaGranularity;
		this.alphaGranularity = alphaGranularity;
		this.gammaDecayGranularity = gammaDecayGranularity;
		this.lambdaDecayGranularity = lambdaDecayGranularity;
		this.alphaDecayGranularity = alphaDecayGranularity;
		this.useGammaDecay = useGammaDecay;
		this.useLambdaDecay = useLambdaDecay;
		this.useAlphaDecay = useAlphaDecay;
	}

	@Override
	public float getWeightLearningRate(int layer, int... coords) {
		float rate = 0.0f;
		switch(alphaGranularity) {
		case SINGLE:
			rate = alphas[0].get(0);
			break;
		case LAYER:
			rate = alphas[layer].get(0);
			break;
		case UNIT:
			rate = alphas[layer].get(coords);
			break;
		}
		return rate;
	}

	@Override
	public float getBiasLearningRate(int layer, int... coords) {
		// TODO Auto-generated method stub
		return 0;
	}
	
	public float getGamma(int layer, int... coords) {
		float rate = 0.0f;
		switch(gammaGranularity) {
		case SINGLE:
			rate = gammas[0].get(0);
			break;
		case LAYER:
			rate = gammas[layer].get(0);
			break;
		case UNIT:
			rate = gammas[layer].get(coords);
			break;
		}
		return rate;
	}
	
	public float getOutputGamma(int... coords) {
		float rate = 0.0f;
		switch(gammaGranularity) {
		case SINGLE:
			rate = gammas[0].get(0);
			break;
		case LAYER:
			rate = gammas[gammas.length-1].get(0);
			break;
		case UNIT:
			rate = gammas[gammas.length-1].get(coords, coords);	// treat like bias for output gammas
			break;
		}
		return rate;
	}
	
	public float getLambda(int layer, int... coords) {
		float rate = 0.0f;
		switch(lambdaGranularity) {
		case SINGLE:
			rate = lambdas[0].get(0);
			break;
		case LAYER:
			rate = lambdas[layer].get(0);
			break;
		case UNIT:
			rate = lambdas[layer].get(coords);
			break;
		}
		return rate;
	}
	
	public float getAlpha(int layer, int... coords) {
		float rate = 0.0f;
		switch(alphaGranularity) {
		case SINGLE:
			rate = alphas[0].get(0);
			break;
		case LAYER:
			rate = alphas[layer].get(0);
			break;
		case UNIT:
			rate = alphas[layer].get(coords);
			break;
		}
		return rate;
	}

	public float getGammaDecay(int layer, int... coords) {
		float rate = 0.0f;
		switch(gammaDecayGranularity) {
		case SINGLE:
			rate = gammaDecays[0].get(0);
			break;
		case LAYER:
			rate = gammaDecays[layer].get(0);
			break;
		case UNIT:
			rate = gammaDecays[layer].get(coords);
			break;
		}
		return rate;
	}
	
	public float getLambdaDecay(int layer, int... coords) {
		float rate = 0.0f;
		switch(lambdaDecayGranularity) {
		case SINGLE:
			rate = lambdaDecays[0].get(0);
			break;
		case LAYER:
			rate = lambdaDecays[layer].get(0);
			break;
		case UNIT:
			rate = lambdaDecays[layer].get(coords);
			break;
		}
		return rate;
	}
	
	public float getAlphaDecay(int layer, int... coords) {
		float rate = 0.0f;
		switch(alphaDecayGranularity) {
		case SINGLE:
			rate = alphaDecays[0].get(0);
			break;
		case LAYER:
			rate = alphaDecays[layer].get(0);
			break;
		case UNIT:
			rate = alphaDecays[layer].get(coords);
			break;
		}
		return rate;
	}
	
	@Override
	public LearningRateGranularity getLearningRateGranularity() {return alphaGranularity;}
	public LearningRateGranularity getGammaGranularity() {return gammaGranularity;}
	public LearningRateGranularity getLambdaGranularity() {return lambdaGranularity;}
	public LearningRateGranularity getAlphaGranularity() {return alphaGranularity;}
	public LearningRateGranularity getGammaDecayGranularity() {return gammaDecayGranularity;}
	public LearningRateGranularity getLambdaDecayGranularity() {return lambdaDecayGranularity;}
	public LearningRateGranularity getAlphaDecayGranularity() {return alphaDecayGranularity;}
	
	public boolean getUseGammaDecay() {return useGammaDecay;}
	public boolean getUseLambdaDecay() {return useLambdaDecay;}
	public boolean getUseAlphaDecay() {return useAlphaDecay;}	
	
	@Override
	public void decayLearningRates() {
		if (useGammaDecay) {
			for (int layer = 0; layer < gammas.length; layer++) {
				for (MatrixIterator it = gammas[layer].iterator(); it.hasNext(); it.next()) {
					decayGammas(layer, it);
				}
			}
		}
		if (useLambdaDecay) {
			for (int layer = 0; layer < lambdas.length; layer++) {
				for (MatrixIterator it = lambdas[layer].iterator(); it.hasNext(); it.next()) {
					decayLambdas(layer, it);
				}
			}
		}
		if (useAlphaDecay) {
			for (int layer = 0; layer < alphas.length; layer++) {
				for (MatrixIterator it = alphas[layer].iterator(); it.hasNext(); it.next()) {
					decayAlphas(layer, it);
				}
			}
		}
	}

	protected void decayGammas(int layer, MatrixIterator gamma) {
		float current = gamma.get();
		float decay = getGammaDecay(layer, gamma.getCurrentCoordinates());
		
		if (current == 0) return; // Don't decay, just leave it
		
		if (current > 0) {	// Positive case, decay negatively
			if (current < decay) gamma.set(0.0f);
			else gamma.set( current - decay );
		} else {	// Negative case, decay positively
			if (current > decay) gamma.set(0.0f);
			else gamma.set( current + decay );
		}
	}
	
	protected void decayLambdas(int layer, MatrixIterator lambda) {
		float current = lambda.get();
		float decay = getLambdaDecay(layer, lambda.getCurrentCoordinates());
		
		if (current == 0) return; // Don't decay, just leave it
		
		if (current > 0) {	// Positive case, decay negatively
			if (current < decay) lambda.set(0.0f);
			else lambda.set( current - decay );
		} else {	// Negative case, decay positively
			if (current > decay) lambda.set(0.0f);
			else lambda.set( current + decay );
		}
	}
	
	protected void decayAlphas(int layer, MatrixIterator alphas) {
		float current = alphas.get();
		float decay = getAlphaDecay(layer, alphas.getCurrentCoordinates());
		
		if (current == 0) return; // Don't decay, just leave it
		
		if (current > 0) {	// Positive case, decay negatively
			if (current < decay) alphas.set(0.0f);
			else alphas.set( current - decay );
		} else {	// Negative case, decay positively
			if (current > decay) alphas.set(0.0f);
			else alphas.set( current + decay );
		}
	}
	
	public NDFloatArray[] getGammas() {return gammas;}
	public NDFloatArray[] getLambdas() {return lambdas;}
	public NDFloatArray[] getAlphas() {return alphas;}
	public NDFloatArray[] getGammaDecays() {return gammaDecays;}
	public NDFloatArray[] getLambdaDecays() {return lambdaDecays;}
	public NDFloatArray[] getAlphaDecays() {return alphaDecays;}
	
	
	@Override
	public NDFloatArray[] getWeightLearningRates() {
		throw new RuntimeException("Not Implemented for GridNetTD.");
	}

	@Override
	public NDFloatArray[] getBiasLearningRates() {
		throw new RuntimeException("Not Implemented for GridNetTD.");
	}

	@Override
	public NDFloatArray[] getWeightLearningRateDecays() {
		throw new RuntimeException("Not Implemented for GridNetTD.");
	}

	@Override
	public NDFloatArray[] getBiasLearningRateDecays() {
		throw new RuntimeException("Not Implemented for GridNetTD.");
	}

	@Override
	public float getWeightLearningRateDecay(int layer, int... coords) {
		throw new RuntimeException("Not Implemented for GridNetTD.");
	}

	@Override
	public float getBiasLearningRateDecay(int layer, int... coords) {
		throw new RuntimeException("Not Implemented for GridNetTD.");
	}

	@Override
	public GridNetTD clone() {
		ActivationFunction activationFunction = this.getActivationFunction();
	    NDFloatArray[] layers = new NDFloatArray[this.getLayers().length];
	    for (int i = 0; i < layers.length; i++) layers[i] = this.getLayers()[i].clone();
		NDFloatArray[] weights = new NDFloatArray[this.getWeights().length];
		for (int i = 0; i < weights.length; i++) weights[i] = this.getWeights()[i].clone();
		NDFloatArray[] bias = null;
		if (getEnableBias()) {
			bias = new NDFloatArray[this.getBias().length];
			for (int i = 0; i < bias.length; i++) bias[i] = this.getBias()[i].clone();
		}
		
		NDFloatArray[] gammas = new NDFloatArray[this.gammas.length];
		for (int i = 0; i < gammas.length; i++) gammas[i] = this.gammas[i].clone();
		NDFloatArray[] lambdas = new NDFloatArray[this.lambdas.length];
		for (int i = 0; i < lambdas.length; i++) lambdas[i] = this.lambdas[i].clone();
		NDFloatArray[] alphas = new NDFloatArray[this.alphas.length];
		for (int i = 0; i < alphas.length; i++) alphas[i] = this.alphas[i].clone();
		
		NDFloatArray[] gammaDecays = null;
		if (null != this.gammaDecays) {
			gammaDecays = new NDFloatArray[this.gammaDecays.length];
			for (int i = 0; i < gammaDecays.length; i++) gammaDecays[i] = this.gammaDecays[i].clone();
		}
		NDFloatArray[] lambdaDecays = null;
		if (null != this.lambdaDecays){
			lambdaDecays = new NDFloatArray[this.lambdaDecays.length];
			for (int i = 0; i < lambdaDecays.length; i++) lambdaDecays[i] = this.lambdaDecays[i].clone();
		}
		NDFloatArray[] alphaDecays = null;
		if (null != this.alphaDecays){
			alphaDecays = new NDFloatArray[this.alphaDecays.length];
			for (int i = 0; i < alphaDecays.length; i++) alphaDecays[i] = this.alphaDecays[i].clone();
		}
		
		int maxDimensions = this.getMaxDimensions();
	    String name = new String(this.getName());
	    int cyclesPerStep = this.getCyclesPerStep();
	    
	    GridNetTD clone = new GridNetTD(
	    		layers
				, weights
				, bias
				, gammas
				, lambdas
				, alphas
				, gammaDecays
				, lambdaDecays
				, alphaDecays
				, gammaGranularity
				, lambdaGranularity
				, alphaGranularity
				, gammaDecayGranularity
				, lambdaDecayGranularity
				, alphaDecayGranularity
				, useGammaDecay
				, useLambdaDecay
				, useAlphaDecay
				, activationFunction
				, maxDimensions
				, cyclesPerStep
				, enableBias
				, name
		);
	    
		return clone;
	}

	/**
	 * @param args
	 */
	public static void main(String[] args) {
		// TODO Auto-generated method stub

	}

}
