package com.anji.hyperneat.onlinereinforcement;

import java.util.List;

import com.anji.hyperneat.nd.GridNetND;
import com.anji.hyperneat.nd.NDFloatArray;
import com.anji.hyperneat.nd.NDFloatArray.MatrixIterator;
import com.anji.hyperneat.onlinereinforcement.ActivatorNDLR.LearningRateGranularity;
import com.anji.nn.activationfunction.ActivationFunction;
import com.anji.topology.Layer;

public class GridNetHebbianABC extends GridNetND implements ActivatorNDLR {
	
	protected boolean useDecay;
	protected LearningRateGranularity lrGranularity;
	protected LearningRateGranularity lrDecayGranularity;
	protected NDFloatArray[] nLearningRates;
	protected NDFloatArray[] nLearningRateDecays;
	protected NDFloatArray[] ALearningRates;
	protected NDFloatArray[] ALearningRateDecays;
	protected NDFloatArray[] BLearningRates;
	protected NDFloatArray[] BLearningRateDecays;
	protected NDFloatArray[] CLearningRates;
	protected NDFloatArray[] CLearningRateDecays;
	
	public GridNetHebbianABC(List<Layer> allLayers
			, NDFloatArray[] weights
			, NDFloatArray[] bias
			, NDFloatArray[] nLearningRates
			, NDFloatArray[] nLearningRateDecays
			, NDFloatArray[] ALearningRates
			, NDFloatArray[] ALearningRateDecays
			, NDFloatArray[] BLearningRates
			, NDFloatArray[] BLearningRateDecays
			, NDFloatArray[] CLearningRates
			, NDFloatArray[] CLearningRateDecays
			, ActivationFunction activationFunction
			, int maxDimensions
			, int cyclesPerStep
			, boolean enableBias
			, String name
			, LearningRateGranularity lrGranularity
			, LearningRateGranularity lrDecayGranularity
			, boolean useDecay
	) {
		super(allLayers, weights, bias, activationFunction, maxDimensions, cyclesPerStep, enableBias, name);
		this.nLearningRates = nLearningRates;
		this.nLearningRateDecays = nLearningRateDecays;
		this.ALearningRates = ALearningRates;
		this.ALearningRateDecays = ALearningRateDecays;
		this.BLearningRates = BLearningRates;
		this.BLearningRateDecays = BLearningRateDecays;
		this.CLearningRates = CLearningRates;
		this.CLearningRateDecays = CLearningRateDecays;
		this.lrGranularity = lrGranularity;
		this.lrDecayGranularity = lrDecayGranularity;
		this.useDecay = useDecay;
	}
	
	public GridNetHebbianABC(NDFloatArray[] layers
			, NDFloatArray[] weights
			, NDFloatArray[] bias
			, NDFloatArray[] nLearningRates
			, NDFloatArray[] nLearningRateDecays
			, NDFloatArray[] ALearningRates
			, NDFloatArray[] ALearningRateDecays
			, NDFloatArray[] BLearningRates
			, NDFloatArray[] BLearningRateDecays
			, NDFloatArray[] CLearningRates
			, NDFloatArray[] CLearningRateDecays
			, ActivationFunction activationFunction
			, int maxDimensions
			, int cyclesPerStep
			, boolean enableBias
			, String name
			, LearningRateGranularity lrGranularity
			, LearningRateGranularity lrDecayGranularity
			, boolean useDecay
	) {
		super(layers, weights, bias, activationFunction, maxDimensions, cyclesPerStep, enableBias, name);
		this.nLearningRates = nLearningRates;
		this.nLearningRateDecays = nLearningRateDecays;
		this.ALearningRates = ALearningRates;
		this.ALearningRateDecays = ALearningRateDecays;
		this.BLearningRates = BLearningRates;
		this.BLearningRateDecays = BLearningRateDecays;
		this.CLearningRates = CLearningRates;
		this.CLearningRateDecays = CLearningRateDecays;
		this.lrGranularity = lrGranularity;
		this.lrDecayGranularity = lrDecayGranularity;
		this.useDecay = useDecay;
	}

	/**
	 * Constructor for converting a standard GridNetND into a GridNetNDLR.
	 * This copies the references from the base GridNetND; you may wish to clone it first,
	 * if you don't want the original GridNetND mucked with. 
	 * @param base
	 * @param weightLearningRates
	 * @param biasLearningRates
	 * @param weightLearningRateDecays
	 * @param biasLearningRateDecays
	 * @param lrGranularity
	 * @param decayLrg
	 * @param useDecay
	 */
	public GridNetHebbianABC(GridNetND base
			, NDFloatArray[] weightLearningRates
			, NDFloatArray[] biasLearningRates
			, NDFloatArray[] weightLearningRateDecays
			, NDFloatArray[] biasLearningRateDecays
			, NDFloatArray[] nLearningRates
			, NDFloatArray[] nLearningRateDecays
			, NDFloatArray[] ALearningRates
			, NDFloatArray[] ALearningRateDecays
			, NDFloatArray[] BLearningRates
			, NDFloatArray[] BLearningRateDecays
			, NDFloatArray[] CLearningRates
			, NDFloatArray[] CLearningRateDecays
			, LearningRateGranularity lrGranularity
			, LearningRateGranularity lrDecayGranularity
			, boolean useDecay
	) {
		super(base.getLayers(), base.getWeights(), base.getBias(), base.getActivationFunction(), base.getMaxDimensions(), base.getCyclesPerStep(), base.getEnableBias(), base.getName());
		this.nLearningRates = nLearningRates;
		this.nLearningRateDecays = nLearningRateDecays;
		this.ALearningRates = ALearningRates;
		this.ALearningRateDecays = ALearningRateDecays;
		this.BLearningRates = BLearningRates;
		this.BLearningRateDecays = BLearningRateDecays;
		this.CLearningRates = CLearningRates;
		this.CLearningRateDecays = CLearningRateDecays;
		this.lrGranularity = lrGranularity;
		this.lrDecayGranularity = lrDecayGranularity;
		this.useDecay = useDecay;
	}


	public float getWeightLearningRate(int layer, int... coords) {
		return getNLearningRate(layer, coords);
	}
	
	public float getNLearningRate(int layer, int... coords) {
		float rate = 0.0f;
		switch(lrGranularity) {
		case SINGLE:
			rate = nLearningRates[0].get(0);
			break;
		case LAYER:
			rate = nLearningRates[layer].get(0);
			break;
		case UNIT:
			rate = nLearningRates[layer].get(coords);
			break;
		}
		return rate;
	}
	
	public float getALearningRate(int layer, int... coords) {
		float rate = 0.0f;
		switch(lrGranularity) {
		case SINGLE:
			rate = ALearningRates[0].get(0);
			break;
		case LAYER:
			rate = ALearningRates[layer].get(0);
			break;
		case UNIT:
			rate = ALearningRates[layer].get(coords);
			break;
		}
		return rate;
	}
	
	public float getBLearningRate(int layer, int... coords) {
		float rate = 0.0f;
		switch(lrGranularity) {
		case SINGLE:
			rate = BLearningRates[0].get(0);
			break;
		case LAYER:
			rate = BLearningRates[layer].get(0);
			break;
		case UNIT:
			rate = BLearningRates[layer].get(coords);
			break;
		}
		return rate;
	}
	
	public float getCLearningRate(int layer, int... coords) {
		float rate = 0.0f;
		switch(lrGranularity) {
		case SINGLE:
			rate = CLearningRates[0].get(0);
			break;
		case LAYER:
			rate = CLearningRates[layer].get(0);
			break;
		case UNIT:
			rate = CLearningRates[layer].get(coords);
			break;
		}
		return rate;
	}

	@Override
	public float getBiasLearningRate(int layer, int... coords) {
		// TODO Auto-generated method stub
		// probably no bias updates for this.
		return 0;
	}

	@Override
	public LearningRateGranularity getLearningRateGranularity() {
		return lrGranularity;
	}

	@Override
	public void decayLearningRates() {
		if (!useDecay) return;
		
		switch(lrGranularity) {
		case SINGLE:
			decayLearningRates(0, new int[] {0});

			break;
		case LAYER:
			for (int layer = 0; layer < getNumLayers() - 1; layer++) {
				decayLearningRates(layer, new int[] {0});
			}
			break;
		case UNIT:
			for (int layer = 0; layer < getNumLayers() - 1; layer++) {
				for (MatrixIterator it = getWeights()[layer].iterator(); it.hasNext(); it.next()) {
					decayLearningRates(layer, it.getCurrentCoordinates());
				}
			}
			break;
		}		
	}

	
	protected void decayLearningRates(int layer, int... coords) {
		// decay n learning parameter first
		
		float current = nLearningRates[layer].get(coords);
		float decay = nLearningRateDecays[layer].get(coords);
		
		if (current == 0) return; // Don't decay, just leave it
		
		if (current > 0) {	// Positive case, decay negatively
			if (current < decay) nLearningRates[layer].set(0.0f, coords);
			else nLearningRates[layer].set( current - decay , coords );
		} else {	// Negative case, decay positively
			if (current > decay) nLearningRates[layer].set(0.0f, coords);
			else nLearningRates[layer].set( current + decay , coords );
		}
		
		// decay A learning parameter 
		current = ALearningRates[layer].get(coords);
		decay = ALearningRateDecays[layer].get(coords);
		
		if (current == 0) return; // Don't decay, just leave it
		
		if (current > 0) {	// Positive case, decay negatively
			if (current < decay) ALearningRates[layer].set(0.0f, coords);
			else ALearningRates[layer].set( current - decay , coords );
		} else {	// Negative case, decay positively
			if (current > decay) ALearningRates[layer].set(0.0f, coords);
			else ALearningRates[layer].set( current + decay , coords );
		}
		
		
		// decay B learning parameter 
		current = BLearningRates[layer].get(coords);
		decay = BLearningRateDecays[layer].get(coords);
		
		if (current == 0) return; // Don't decay, just leave it
		
		if (current > 0) {	// Positive case, decay negatively
			if (current < decay) BLearningRates[layer].set(0.0f, coords);
			else BLearningRates[layer].set( current - decay , coords );
		} else {	// Negative case, decay positively
			if (current > decay) BLearningRates[layer].set(0.0f, coords);
			else BLearningRates[layer].set( current + decay , coords );
		}
		
		// decay C learning parameter 
		current = CLearningRates[layer].get(coords);
		decay = CLearningRateDecays[layer].get(coords);
		
		if (current == 0) return; // Don't decay, just leave it
		
		if (current > 0) {	// Positive case, decay negatively
			if (current < decay) CLearningRates[layer].set(0.0f, coords);
			else CLearningRates[layer].set( current - decay , coords );
		} else {	// Negative case, decay positively
			if (current > decay) CLearningRates[layer].set(0.0f, coords);
			else CLearningRates[layer].set( current + decay , coords );
		}
	}
	

	@Override
	public NDFloatArray[] getWeightLearningRates() {
		return getNLearningRates();
	}

	public NDFloatArray[] getNLearningRates() {
		return nLearningRates;
	}

	public NDFloatArray[] getALearningRates() {
		return ALearningRates;
	}
	
	public NDFloatArray[] getBLearningRates() {
		return BLearningRates;
	}
	
	public NDFloatArray[] getCLearningRates() {
		return CLearningRates;
	}
	
	@Override
	public NDFloatArray[] getBiasLearningRates() {
		return null;
	}

	@Override
	public NDFloatArray[] getWeightLearningRateDecays() {
		return getNLearningRateDecays();
	}
	
	public NDFloatArray[] getNLearningRateDecays() {
		return nLearningRateDecays;
	}
	
	public NDFloatArray[] getALearningRateDecays() {
		return ALearningRateDecays;
	}
	
	public NDFloatArray[] getBLearningRateDecays() {
		return BLearningRateDecays;
	}

	public NDFloatArray[] getCLearningRateDecays() {
		return CLearningRateDecays;
	}

	@Override
	public float getWeightLearningRateDecay(int layer, int... coords) {
		return nLearningRateDecays[layer].get(coords);
	}

	@Override
	public float getBiasLearningRateDecay(int layer, int... coords) {
		return 0;
	}
	
	@Override
	public GridNetHebbianABC clone() {
		ActivationFunction activationFunction = this.getActivationFunction();
	    NDFloatArray[] layers = new NDFloatArray[this.getLayers().length];
	    for (int i = 0; i < layers.length; i++) layers[i] = this.getLayers()[i].clone();
		NDFloatArray[] weights = new NDFloatArray[this.getWeights().length];
		for (int i = 0; i < weights.length; i++) weights[i] = this.getWeights()[i].clone();
		NDFloatArray[] bias = null;
		if (getEnableBias()) {
			bias = new NDFloatArray[this.getBias().length];
			for (int i = 0; i < bias.length; i++) bias[i] = this.getBias()[i].clone();
		}
		
		NDFloatArray[] nLearningRates = new NDFloatArray[this.nLearningRates.length];
		for (int i = 0; i < nLearningRates.length; i++) nLearningRates[i] = this.nLearningRates[i].clone();
	
		NDFloatArray[] ALearningRates = new NDFloatArray[this.ALearningRates.length];
		for (int i = 0; i < ALearningRates.length; i++) ALearningRates[i] = this.ALearningRates[i].clone();

		NDFloatArray[] BLearningRates = new NDFloatArray[this.BLearningRates.length];
		for (int i = 0; i < BLearningRates.length; i++) BLearningRates[i] = this.BLearningRates[i].clone();

		NDFloatArray[] CLearningRates = new NDFloatArray[this.CLearningRates.length];
		for (int i = 0; i < CLearningRates.length; i++) CLearningRates[i] = this.CLearningRates[i].clone();
		
		
		NDFloatArray[] nLearningRateDecays = null;
		if (null != this.nLearningRateDecays) {
			nLearningRateDecays = new NDFloatArray[this.nLearningRateDecays.length];
			for (int i = 0; i < nLearningRateDecays.length; i++) nLearningRateDecays[i] = this.nLearningRateDecays[i].clone();
		}
		
		NDFloatArray[] ALearningRateDecays = null;
		if (null != this.ALearningRateDecays) {
			ALearningRateDecays = new NDFloatArray[this.ALearningRateDecays.length];
			for (int i = 0; i < ALearningRateDecays.length; i++) ALearningRateDecays[i] = this.ALearningRateDecays[i].clone();
		}
		
		NDFloatArray[] BLearningRateDecays = null;
		if (null != this.BLearningRateDecays) {
			BLearningRateDecays = new NDFloatArray[this.BLearningRateDecays.length];
			for (int i = 0; i < BLearningRateDecays.length; i++) BLearningRateDecays[i] = this.BLearningRateDecays[i].clone();
		}
		
		NDFloatArray[] CLearningRateDecays = null;
		if (null != this.CLearningRateDecays) {
			CLearningRateDecays = new NDFloatArray[this.CLearningRateDecays.length];
			for (int i = 0; i < CLearningRateDecays.length; i++) CLearningRateDecays[i] = this.CLearningRateDecays[i].clone();
		}
		
		
		int maxDimensions = this.getMaxDimensions();
	    String name = new String(this.getName());
	    int cyclesPerStep = this.getCyclesPerStep();
	    
	    GridNetHebbianABC clone = new GridNetHebbianABC(
	    		layers
	    		, weights
	    		, bias
	    		, nLearningRates
	    		, nLearningRateDecays
	    		, ALearningRates
	    		, ALearningRateDecays
	    		, BLearningRates
	    		, BLearningRateDecays
	    		, CLearningRates 
	    		, CLearningRateDecays
	    		, activationFunction
	    		, maxDimensions
	    		, cyclesPerStep
	    		, getEnableBias()
	    		, name
	    		, lrGranularity
	    		, lrDecayGranularity
	    		, useDecay
		);
	    
		return clone;
	}

	@Override
	public NDFloatArray[] getBiasLearningRateDecays() {
		return null;
	}
}
