package train.ocr;//multilevel neural net framework
//you need to fill in the getRawPrediction and train functions

public class Perceptron {
	private static final double ALPHA = 0.05;
	private static final double NOISEMAX = 0.4;

	//weights from hidden to output layers
	double[] outputweight;
	//weights from input to hidden layers
	double[][] hiddenweight;

	//temporary space for caching hidden layer values
	double[] hidden;

	//number of nodes in input and hidden layers
	int size;

	double[] outputs;


	//constructor.  Called with the number of inputs:  new Perceptron(3,2) makes a three input, common.stack.delayStack.two output perceptron.
	Perceptron(int size) {
		this.size = size;
		//make an array of weights from each hidden, plus a bias, to each output node
		outputweight = new double[size + 1];
		//make a 2D array of weights from each input, plus a bias, to each hidden node
		hiddenweight = new double[size][size + 1];
		for (int i = 0; i < size + 1; i++)
			outputweight[i] = Math.random() * NOISEMAX - NOISEMAX / 2;
		for (int i = 0; i < size; i++)
			for (int j = 0; j < size + 1; j++)
				hiddenweight[i][j] = Math.random() * NOISEMAX - NOISEMAX / 2;
		//create the array for caching, but don't bother initializing it
		hidden = new double[size];
	}

	//returns whether the raw prediction is a 1 or 0
	int getPrediction(int[] inputs) {
		return getRawPrediction(inputs) >= 0.5 ? 1 : 0;
	}

	//takes an array of inputs in range 0 to 1, feeds them to the perceptron, saves a guess in range 0 to 1 in array "outputs"
	double getRawPrediction(int[] inputs) {
		//TODO:
		//1. rescale the inputs from -1 to 1 and copy them to array inputs
		int[] inputsCopy = new int[inputs.length];
		for (int i = 0; i < inputs.length; i++) {
			if (inputs[i] > 0) {
				inputsCopy[i] = 1;
			} else {
				inputsCopy[i] = -1;
			}
		}

		//2. compute dot product of inputs times weights for each hidden.  do sigmoid of total and save it in array hidden
		hidden = new double[size];
		for (int i = 0; i < hiddenweight.length; i++) {
			double hiddenWeight = hiddenweight[i][0];
			for (int j = 1; j < hiddenweight[i].length; j++) {
				hiddenWeight += inputsCopy[j-1] * hiddenweight[i][j];
			}
			hidden[i] = sigmoid(hiddenWeight);
		}

		//3. compute dot product of hidden times weights for each output.  do sigmoid and return it
		outputs = new double[size + 1];
		outputs[0] = outputweight[0];
		double sum = outputweight[0];
		for (int i = 1; i < outputweight.length; i++) {
			outputs[i] = outputweight[i] * hidden[i - 1];
			sum += outputs[i];
		}
		for (int i = 0; i < outputs.length; i++) {
			outputs[i] = sigmoid(outputs[i]);
		}
		return sigmoid(sum);
	}

	//this trains the perceptron on an array of inputs (1/0) and desired outputs (1/0)
//the weights are adjusted and errors are saved in array "error".  return TRUE if training is done
//	boolean train(int[] inputs, int want) {
//		//TODO:
//		//1. call getPrediction on inputs.  this will put values in hidden and outputs that we can use for training
//		int predicted = getPrediction(inputs);
//
//		//2. compute output error for each output and save it in "errors":  error = desired-predicted
//		double errors[] = new double[size+1];
//		errors[0] = -outputs[0];
//		for (int i = 1; i < errors.length; i++) {
//			errors[i] = inputs[i - 1] - outputs[i];
//		}
//
//		//3. compute output training error for each output node:  outtrainerror = error * predicted * (1-predicted)
//		double outTrainErrors[] = new double[size+1];
//		for (int i = 0; i < outTrainErrors.length; i++) {
//			outTrainErrors[i] = errors[i] * outputs[i] * (1 - outputs[i]);
//		}
//
//		//4. compute hidden error for each hidden node:  hiddenerror = sum of (outtrainerror * output weight) over all outputs
//
//		double hiddenErrors[] = new double[size+1];
//		for (int i = 0; i < hiddenErrors.length; i++) {
//			double sum = 0;
//			for (int j = 0; j < outputweight.length; j++) {
//				sum += outTrainErrors[i] * outputweight[j];
//			}
//			hiddenErrors[i] = sum;
//		}
//
//		//5. for each hidden node, apply output training error to weights:  outputweight += alpha * outtrainerror * hidden-value
//		//don't forget to train the bias weight.  it has a hidden-value of 1
//		outputweight[0] += ALPHA * outTrainErrors[0] * 1;
//		for (int i = 1; i < outputweight.length; i++) {
//			outputweight[i] += ALPHA * outTrainErrors[i] * hidden[i - 1];
//		}
//
//		//6. over each input, compute hidden training error: hiddentrainerror = hiddenerror * hidden-value * (1-hidden-value)
//		double[] hiddenTrainErrors = new double[size + 1];
//		hiddenTrainErrors[0] = hiddenErrors[0];
//		for (int i = 1; i < hiddenTrainErrors.length; i++) {
//			hiddenTrainErrors[i] = hiddenErrors[i] * hidden[i-1] * (1 - hidden[i-1]);
//		}
//
//		//7. apply that error to the input weight: hiddenweight += hiddentrainingerror * inputvalue * (1-inputvalue)
//		int[] inputsCopy = new int[inputs.length];
//		for (int i = 0; i < inputs.length; i++) {
//			if (inputs[i] > 0) {
//				inputsCopy[i] = 1;
//			} else {
//				inputsCopy[i] = -1;
//			}
//		}
//
//		for (int i = 0; i < hiddenweight.length; i++) {
//			for (int j = 0; j < hiddenweight[i].length; j++) {
//				hiddenweight[i][j] += hiddenTrainErrors[j] * inputsCopy[i] * (1 - inputsCopy[i]);
//			}
//		}
//
//		//8. go through all the errors in the array and keep track of the maximum.  if the max error is below some threshold (say 0.1), return TRUE. (else FALSE)
//		double max = Double.MIN_NORMAL;
//		for (int i = 0; i < errors.length; i++) {
//			if (max < Math.abs(errors[i])) {
//				max = Math.abs(errors[i]);
//			}
//		}
//
//		return max < 0.1;
//	}

	//this trains the perceptron on an array of inputs (1/0) and desired outputs (1/0)
//the weights are adjusted and errors are saved in array "error".  return TRUE if training is done
	boolean train(int[] inputs, int want) {
		//TODO:
		//1. call getPrediction on inputs.  this will put values in hidden and outputs that we can use for training
		double predicted = getRawPrediction(inputs);

		//2. compute output error for each output and save it in "errors":  error = desired-predicted
		double error = want - predicted;

		//3. compute output training error for each output node:  outtrainerror = error * predicted * (1-predicted)
		double oerror = error * predicted * (1 - predicted);

		//4. compute hidden error for each hidden node:  hiddenerror = sum of (outtrainerror * output weight) over all outputs
		double[] hiddenErrors = new double[size];
		for (int i = 0; i < size; i++) {
			hiddenErrors[i] = hidden[i] * (1 - hidden[i]) * oerror * outputweight[i + 1];
		}

		//5. for each hidden node, apply output training error to weights:  outputweight += alpha * outtrainerror * hidden-value
		//don't forget to train the bias weight.  it has a hidden-value of 1
		outputweight[0] += ALPHA * oerror;
		for (int i = 0; i < size; i++) {
			outputweight[i] += ALPHA * oerror * hidden[i];
		}
		//6. over each input, compute hidden training error: hiddentrainerror = hiddenerror * hidden-value * (1-hidden-value)

//		double[] hiddenTrainErrors = new double[size + 1];
//		hiddenTrainErrors[0] = hiddenErrors[0];
//		for (int i = 1; i < hiddenTrainErrors.length; i++) {
//			hiddenTrainErrors[i] = hiddenErrors[i] * hidden[i-1] * (1 - hidden[i-1]);
//		}

		//7. apply that error to the input weight: hiddenweight += hiddentrainingerror * inputvalue * (1-inputvalue)
		for (int i = 0; i < hiddenweight.length; i++) {
			hiddenweight[i][0] += hiddenErrors[i]  * ALPHA;
			for (int j = 1; j < hiddenweight[i].length; j++) {
				int input = inputs[j - 1] > 0 ? 1 : -1;
				hiddenweight[i][j] += hiddenErrors[i] * input * ALPHA;
			}
		}

		//8. go through all the errors in the array and keep track of the maximum.  if the max error is below some threshold (say 0.1), return TRUE. (else FALSE)

		return Math.abs(error) <= 0.1;
	}

	//implements the threshold function 1/(1+e^-x)
//this is mathematically close to the >=0 threshold we use in the single layer perceptron, but is differentiable
	static double sigmoid(double x) {
		return 1.0 / (1.0 + Math.pow(2.71828, -x));
	}
}