package competition.cig.cs478.backprop;

import java.io.Serializable;

public class Network implements Serializable {
	/**
	 * 
	 */
	private static final long serialVersionUID = 6925800425926469475L;
	private double[][] a;
	private double[][][] w;
	private double[][][] c;

	/**
	 * Initialize a backprop network with ni input nodes, 
	 * nh hidden nodes per layer, nhl hidden layers, and
	 * no output nodes
	 * 
	 * @param ni
	 * @param nh
	 * @param nhl
	 * @param no
	 */
	public Network(int ni, int nh, int nhl, int no) {
		int nl = nhl + 2;
		ni++;		
		
		// Initialize activations
		a = new double[nl][];
		a[0] = new double[ni];
		for (int i = 0; i < ni; i++) {
			a[0][i] = 1.0;
		}
		for (int j = 0; j < nhl; j++) {
			a[j+1] = new double[nh];
			for (int i = 0; i < nh; i++) {
				a[j+1][i] = 1.0;
			}
		}
		a[nl - 1] = new double[no];
		for (int i = 0; i < no; i++) {
			a[nl - 1][i] = 1.0;
		}

		// Initialize weights
		w = new double[nl - 1][][];
		c = new double[nl - 1][][];
		for (int l = 0; l < nl - 1; l++) {
			w[l] = new double[a[l].length][a[l+1].length];
			c[l] = new double[a[l].length][a[l+1].length];
		}
		for (int i = 0; i < w.length; i++) {
			for (int j = 0; j < w[i].length; j++) {
				for (int k = 0; k < w[i][j].length; k++) {
					w[i][j][k] = getWeight();
					c[i][j][k] = 0.0;
				}
			}
		}
	}

	/**
	 * Having already updated the network, now backpropagate the weight
	 * changes based on the target output, learning rate, and momentum given
	 * 
	 * @param targets
	 * @param learningRate
	 * @param momentum
	 * @return
	 */
	public double backPropagate(double targets[], double learningRate,
			double momentum) {
		int ol = a.length - 1;
		int nl = a.length;
		double[][] errors = new double[nl][];
		
		//Output error
		errors[ol] = new double[a[ol].length];
		for (int k = 0; k < a[ol].length; k++) {
			double error = targets[k] - a[ol][k];
			errors[ol][k] = dsigmoid(a[ol][k]) * error;
		}
		
		//Hidden error
		for (int l = nl - 2; l > 0; l--) {
			errors[l] = new double[a[l].length];
			for (int j = 0; j < a[l].length; j++) {
				double error = 0.0;
				for (int k = 0; k < a[l+1].length; k++) {
					error += errors[l+1][k] * w[l][j][k];
				}
				errors[l][j] = dsigmoid(a[l][j]) * error;
			}
		}
		
		// update weights
		for (int l = 0; l < nl-1; l++) {
			for (int j = 0; j < a[l].length; j++) {
				for (int k = 0; k < a[l+1].length; k++) {
					double change = (learningRate * errors[l+1][k] * a[l][j]) + (momentum * c[l][j][k]);
					w[l][j][k] += change;
					c[l][j][k] = change;
				}
			}
		}
		
		// calculate error
		double error = 0.0;
		for (int k = 0; k < a[ol].length; k++) {
			error += 0.5 * Math.pow(targets[k] - a[ol][k], 2);
		}
		
		return error;
	}

	/**
	 * Using current weights, calculate the output from the given input
	 * 
	 * @param inputs
	 * @return
	 */
	public double[] update(double inputs[]) {
		// input activations
		for (int i = 0; i < inputs.length; i++) {
			a[0][i] = inputs[i];
		}

		// hidden/output activations
		for (int l = 1; l < a.length; l++) {
			for (int j = 0; j < a[l].length; j++) {
				double sum = 0.0;
				for (int i = 0; i < a[l-1].length; i++) {
					sum += a[l-1][i] * w[l-1][i][j];
				}
				a[l][j] = sigmoid(sum);
			}
		}

		return a[a.length - 1];
	}

	/**
	 * Sigmoid(x)
	 * @param input
	 * @return
	 */
	private double sigmoid(double input) {
		return 1.0 / (1.0 + Math.exp(-input));
	}

	/**
	 * d( sigmoid(x) )/dx
	 * @param input
	 * @return
	 */
	private double dsigmoid(double input) {
		return input * (1.0 - input);
	}

	/**
	 * Initialize weights to a random value with this function
	 * @return
	 */
	private double getWeight() {
		return (Math.random() - 0.5) / 4;
	}
}
