import java.util.ArrayList;
import java.util.List;


public class ContinuousNeuronLayer {
	private ArrayList<ContinuousPerceptron> neurons;
	private double learningRate;
	private ContinuousNeuronLayer previousLayer;
	private ContinuousNeuronLayer nextLayer;
	
	//1 weight per input per perceptron 
	private double[][] lastWeight;
	//1 output per perceptron 
	private double[] lastOutput;
	//the input is given to all perceptrons, only one copy
	private double[] lastInput;
	
	public ContinuousNeuronLayer(int numOfNeurons, double[][] weights, double c) {
		lastWeight = weights;
		learningRate = c;
		neurons = new ArrayList<ContinuousPerceptron>(numOfNeurons);
		ContinuousPerceptron perceptron;
		for (int i = 0; i < numOfNeurons; i++) {
			perceptron = new ContinuousPerceptron(weights[i], learningRate);
			neurons.add(perceptron);
		}
	}

	public void setPreviousLayer(ContinuousNeuronLayer cnl) {
		previousLayer = cnl;
	}
	
	public void setNextLayer(ContinuousNeuronLayer cnl) {
		nextLayer = cnl;
	}
	
	public double[][] getLastWeights() {
		return lastWeight;
	}
	
	public int getNumOfNeurons() {
		return neurons.size();
	}
	
	//used for validation and testing, won't update the weighting at all
	public double[] testOutput(double[] input) {
		double[] output = new double[neurons.size()];
		int i = 0;
		//account for the need for bias inputs
		if (nextLayer == null && previousLayer != null) {
			double[] temp = new double[input.length+1];
			//copy over the original input 
			for (i = 0; i < input.length; i++) {
				temp[i] = input[i];
			}
			//add the bias 
			temp[temp.length-1] = -1;
			input = temp; 		
			i = 0;
		} 
		//there should only be one output per cp
		for (ContinuousPerceptron cp : neurons) {
			output[i] = cp.calcOutput(input);	
			i++;
		}
		if (nextLayer != null) {
			output = nextLayer.testOutput(output);
		}
		
		return output;
	}
	
	//return the error signal
	//used to train the network
	public double[] trainLayer(double[] input, double[] desired) {
		double[] output = new double[neurons.size()];
		int i = 0;
		//account for the need for bias inputs
		if (nextLayer == null && previousLayer != null) {
			double[] temp = new double[input.length+1];
			//copy over the original input 
			for (i = 0; i < input.length; i++) {
				temp[i] = input[i];
			}
			//add the bias 
			temp[temp.length-1] = -1;
			input = temp; 		
			i = 0;
		} 
		
		for (ContinuousPerceptron cp : neurons) {
			for (i = 0; i < output.length; i++) {
				output[i] = cp.calcOutput(input);	
			}
		}
		lastInput = input;
		lastOutput = output;
		
		double[] errorSignal = null;
		//if there is a next layer, propagate to it
		if (nextLayer != null) {
			errorSignal = nextLayer.trainLayer(output, desired);
			//and then update our weights
			this.updateWeightings(errorSignal);
		} //else if we are on the final layer 
		else {
			errorSignal = calculateErrorSignal(output, desired);
			this.updateWeightings(errorSignal);
			//now update the error signal - Note we should probably merge this into the calcErrorSig method & change the output layer weight change algorithm
			for (i = 0; i < output.length; i++) {
				errorSignal[i] = errorSignal[i] * (0.5*(1-Math.pow(output[i],2)));
			}
		}
		
		return errorSignal;
	}
	
	private double[] calculateErrorSignal(double[] output, double[] desired) {
		double[] errorSignal = new double[output.length];
		for (int i = 0; i < output.length; i++) {
			errorSignal[i] = desired[i] - output[i];
		}
		return errorSignal;
	}
	
	private void updateWeightings(double[] errorSignal) {
		//update our weightings as per the learning rule

		double[][] newWeighting = new double[neurons.size()][lastInput.length];
		int i, j, k;
		i = 0;
		double errorSignalSum = 0.00;

		if (nextLayer != null) {
			//we know that we're a hidden layer if we have a next layer defined
			//modified weights will be
			//w(ji)* = w(ji) + n*(0.5*(1 - y(i)^2)*x(i)*(error sig'*weighting)
			//don't worry about bias neurons for the moment
			for (ContinuousPerceptron cp: neurons) {
				for (j = 0; j < lastInput.length; j++) {
					for (k = 0; k < errorSignal.length; k++) {
						errorSignalSum += errorSignal[k] * lastWeight[i][k];
					}
					newWeighting[i][j] = lastWeight[i][j] + learningRate*(0.5*(1-Math.pow(lastOutput[i],2))*lastInput[j]*errorSignalSum);
				}
				cp.setWeights(newWeighting[i]);
				lastWeight[i] = newWeighting[i];
				i++;
			}
		} else {
			for (k = 0; k < errorSignal.length; k++) {
				errorSignalSum += errorSignal[k];
			}
			//otherwise update it by the regular delta learning rule
			//w* = w + n(d-z)*0.5(1-z^2)*y
			for (ContinuousPerceptron cp: neurons) {
				for (j = 0; j < lastInput.length; j++) {
					newWeighting[i][j] = lastWeight[i][j] + learningRate * errorSignalSum * 0.5 * (1 - Math.pow(lastOutput[i],2))* lastInput[j];
				}
				cp.setWeights(newWeighting[i]);
				lastWeight[i] = newWeighting[i];
				i++;
			}
		}
	}
	
	public void example_3_4() {
		double[][] in = new double[][] { {1,1}, {-0.5, 1}, {3,1}, {-2,1}};
		double[][] desired = new double[][] { {1},{-1},{1},{-1}};
		double[][] z = new double[4][1];
		double[] testOutput;
		double errorSigSum = 0.00;
		boolean continueRunning = true;
		int i = 0;
		//for (int i = 0; i < 160; i++) {
		while (continueRunning) {
			z[i%4] = this.trainLayer(in[i%4], desired[i%4]);
			//now see if we can stop, i.e. all 4 inputs can be classified
			//should really be done with a validation set 
			for (int j = 0; j < 4; j++) {
				testOutput = this.testOutput(in[j]);
				for (int k = 0; k < testOutput.length; k++) {
					errorSigSum += testOutput[k] - desired[j][k];
					System.out.println("Output was " + testOutput[k]+ " from input "+ in[j][k]);
				}
			}
			if (errorSigSum < 0.054) {
				continueRunning = false;
			} else {
				errorSigSum = 0.0;
			}
			i++;
		}
		System.out.println("# of Runs: " + i);
		for (double[] lw : lastWeight) {
			for (double w: lw) {
				System.out.println(w+",");
			}
			System.out.println();
		}
	}
	
	public void example_3_7() {
		double[][] in = new double[][] { {0,0,-1}, {0,1,-1}, {1,1,-1}, {1,0,-1}};
		double[][] desired = new double[][] { {-1},{1},{-1},{1}};
		double[][] z = new double[4][1];
		for (int i = 0; i < 1000; i++) {
			z[i%4] = this.trainLayer(in[i%4], desired[i%4]);
		}
	}
	
	public static void main(String[] args) {
		double[][] weights_layer1 = new double[][] { {-6.9938, 6.6736, 1.5555}, {-4.2812, 3.9127, 3.6233}};
		double[][] weights_layer2 = new double[][] { {-0.8568, 0.3998, -1.0702}};
		
		ContinuousNeuronLayer layer1 = new ContinuousNeuronLayer(2,weights_layer1,0.1);		
		ContinuousNeuronLayer layer2 = new ContinuousNeuronLayer(1,weights_layer2,0.1);
		layer1.setNextLayer(layer2);
		layer2.setPreviousLayer(layer1);
		layer1.example_3_7();
		
		//now check the weights
		System.out.println("Weights on layer 1");
		for (double[] weights : layer1.lastWeight) {
			for (double w : weights) {
				System.out.print(Double.toString(w)+",");
			}
			System.out.println();
		}
		
		System.out.println("Weights on layer 2");
		for (double[] weights : layer2.lastWeight) {
			for (double w : weights) {
				System.out.print(w+",");
			}
		}
	}
}
