package backproped;
import gui.Filesystem;
import gui.NeuralNetworkSet;

import java.io.FileReader;
import java.io.FileWriter;
import java.util.ArrayList;


public class ContinuousNeuronLayerClassifier {
	private ArrayList<ContinuousPerceptron> neurons;
	private double learningRate;
	private ContinuousNeuronLayerClassifier previousLayer;
	private ContinuousNeuronLayerClassifier nextLayer;
	
	//1 weight per input per perceptron 
	double[][] lastWeight;
	//1 output per perceptron 
	private double[] lastOutput;
	//the input is given to all perceptrons, only one copy
	private double[] lastInput;
	
	
	public ContinuousNeuronLayerClassifier(int numOfNeurons, double[][] weights, double c) {
		lastWeight = weights;
		learningRate = c;
		neurons = new ArrayList<ContinuousPerceptron>(numOfNeurons);
		ContinuousPerceptron perceptron;
		for (int i = 0; i < numOfNeurons; i++) {
			perceptron = new ContinuousPerceptron(weights[i], learningRate);
			neurons.add(perceptron);
		}
	}

	/*
	 * Connect one layer to its previous, feeding, layer
	 */
	public void setPreviousLayer(ContinuousNeuronLayerClassifier cnl) {
		previousLayer = cnl;
	}
	
	/*
	 * Connect one layer to its next layer
	 */
	public void setNextLayer(ContinuousNeuronLayerClassifier cnl) {
		nextLayer = cnl;
	}
	
	/*
	 * Collect the weight matrix for all neurons on a layer
	 */
	public double[][] getLastWeights() {
		return lastWeight;
	}
	
	/*
	 * Find number of neurons in each layer
	 */
	public int getNumOfNeurons() {
		return neurons.size();
	}
	
	/*
	 * This method will generate the Output Matrix z, each final output being passed through the TLU fn
	 * This will be used for validation and testing, it won't update any weights 
	 */
	public double[] classifyInput(double[] input) {
		double[] output = new double[neurons.size()];
		int i = 0;
		//account for the need for bias inputs
		if (nextLayer == null && previousLayer != null) {
			double[] temp = new double[input.length+1];
			//copy over the original input 
			for (i = 0; i < input.length; i++) {
				temp[i] = input[i];
			}
			//add the bias 
			temp[temp.length-1] = -1;
			input = temp; 		
			i = 0;
		} 
		//there should only be one output per cp
		for (ContinuousPerceptron cp : neurons) {
			output[i] = cp.calcOutput(input);	
			i++;
		}
		
		i = 0;
		if (nextLayer != null) {
			output = nextLayer.classifyInput(output);
		} else {
			//if we are the final layer we need to put the output through the TLU 
			for (i = 0; i < output.length; i++) {
				output[i] = this.TLU(output[i]);
			}
		}
		return output;
	}
	/*
	 * This method will generate the Output Matrix z, without final output being passed through the TLU fn
	 * This will be used for validation and testing, it won't update any weights 
	 * 
	 * TODO: Combine this with the above TLU-based method
	 */
	public double[] testOutput(double[] input) {
		double[] output = new double[neurons.size()];
		int i = 0;
		//account for the need for bias inputs
		if (nextLayer == null && previousLayer != null) {
			double[] temp = new double[input.length+1];
			//copy over the original input 
			for (i = 0; i < input.length; i++) {
				temp[i] = input[i];
			}
			//add the bias 
			temp[temp.length-1] = -1;
			input = temp; 		
			i = 0;
		} 
		//there should only be one output per cp
		for (ContinuousPerceptron cp : neurons) {
			output[i] = cp.calcOutput(input);	
			i++;
		}
		
		if (nextLayer != null)
			output = nextLayer.testOutput(output);
		return output;
	}
	
	
	/*
	 * This is used to train the layer based upon the input and the desired matrices
	 * This will work using feed forward back propagation using the Delta Learning Rule
	 * 
	 * This returns the ERROR SIGNAL, which is required for recursion. 
	 */
	public double[] trainLayer(double[] input, double[] desired) {
		double[] output = new double[neurons.size()];
		int i = 0;
		//account for the need for bias inputs
		if (nextLayer == null && previousLayer != null) {
			double[] temp = new double[input.length+1];
			//copy over the original input 
			for (i = 0; i < input.length; i++) {
				temp[i] = input[i];
			}
			//add the bias 
			temp[temp.length-1] = -1;
			input = temp;
			i = 0;
		} 
		for (ContinuousPerceptron cp : neurons) {
			output[i] = cp.calcOutput(input);	
			i++;
		}
		
		lastInput = input;
		lastOutput = output;
		
		double[] errorSignal = null;
		//if there is a next layer, propagate to it
		if (nextLayer != null) {
			errorSignal = nextLayer.trainLayer(output, desired);
			//and then update our weights
			this.updateWeightings(errorSignal);
		} //else if we are on the final layer 
		else {
			errorSignal = calculateErrorSignal(output, desired);
			this.updateWeightings(errorSignal);
			//now update the error signal
			for (i = 0; i < output.length; i++) {
				errorSignal[i] = errorSignal[i] * (0.5*(1-Math.pow(output[i],2)));
			}
		}
		
		return errorSignal;
	}
	
	/*
	 * Find the error signal by substracting the output from the desired output
	 * 
	 * TODO: See if we can add the multiplication by the derivate dz (0.5 * (1-z)^2)) in here rather than doing it afterwards
	 */
	private double[] calculateErrorSignal(double[] output, double[] desired) {
		double[] errorSignal = new double[output.length]; 
		for (int i = 0; i < output.length; i++) {
			errorSignal[i] = desired[i] - output[i];
		}
		
		return errorSignal;
	}
	
	/*
	 * This is the method that actual changes the weights for a neuron based upon the delta learning rule and back propagation
	 * 
	 * The weights are updated slightly differently depending on whether we are a hidden layer or an output layer
	 */
	private void updateWeightings(double[] errorSignal) {
		double[][] newWeighting = new double[neurons.size()][lastWeight[0].length];
		int i, j, k;
		i = 0;
		double errorSignalSum = 0.00;

		if (nextLayer != null) {
			//we know that we're a hidden layer if we have a next layer defined
			//modified weights will be
			//w(ji)* = w(ji) + n*(0.5*(1 - y(i)^2)*x(i)*(error sig'*weighting)
			//don't worry about bias neurons for the moment
			
			//grab the weights between the hidden layer and the output layer
			double[][] nextLayerWeights = nextLayer.getLastWeights();
			int numberOfOutputs = nextLayer.getNumOfNeurons();
			for (ContinuousPerceptron cp: neurons) {
				errorSignalSum = 0;
				for (k = 0; k < numberOfOutputs; k++) {
					errorSignalSum += errorSignal[k] * nextLayerWeights[k][i];
				}
				
				for (j = 0; j < lastWeight[0].length; j++) {
					newWeighting[i][j] = lastWeight[i][j] + learningRate*(0.5*(1-Math.pow(lastOutput[i],2))*lastInput[j]*errorSignalSum);
				}
				cp.setWeights(newWeighting[i]);
				lastWeight[i] = newWeighting[i];
				i++;
			}
		} else {
			//otherwise update it by the regular delta learning rule
			//w* = w + n(d-z)*0.5(1-z^2)*y
			for (k = 0; k < errorSignal.length; k++) {
				errorSignalSum += errorSignal[k];
			}
			
			for (ContinuousPerceptron cp: neurons) {
				for (j = 0; j < lastWeight[0].length; j++) {
					newWeighting[i][j] = lastWeight[i][j] + learningRate * errorSignalSum * 0.5 * (1 - Math.pow(lastOutput[i],2))* lastInput[j];
				}
				cp.setWeights(newWeighting[i]);
				lastWeight[i] = newWeighting[i];
				i++;
			}
		}
	}
	
	/*
	 * Can be replaced with Math.signum(double input)
	 */
	public int TLU(double input) {
		if (input > 0)
			return 1;
		else
			return -1;
	}
		
	/*
	 * This will generate a size num matrix that can be used as weights
	 * The weights will always be between 0 and 1		
	 */
	public static double[] generateRandomWeights(int num) {
		double[] weights = new double[num];
		
		for (int i = 0; i < num; i++) {
			if (i%2 == 0)
				weights[i] = Math.random();
			else
				weights[i] = -1 + Math.random();
		}
		
		return weights;
	}
	/*
	 * This is strictly for the creation of a 2 layer neuron network, 1 hidden layer, 1 output
	 * 
	 * Inputs: There are n inputs, x long
	 * Desired: There are n desired outputs, x long
	 * 
	 * Taking the learning constant to be 0.2
	 */
	public static ContinuousNeuronLayerClassifier create2LayerNetwork(int numOfInput, int numOfOutput, int numOfHidden) {
		//the number of desired is the number of output neurons
				
		//if no numOfHidden, we should find the ideal size, this is a bit of work atm
		double[][] hiddenWeights = new double[numOfHidden][numOfInput];
		for (int i = 0; i < numOfHidden; i++) {
			hiddenWeights[i] = generateRandomWeights(numOfInput);
		}
		//num of weights on hidden layer = length of first input size
		ContinuousNeuronLayerClassifier hiddenLayer = new ContinuousNeuronLayerClassifier(numOfHidden, hiddenWeights, 0.8);
		
		double[][] outputWeights = new double[numOfOutput][numOfHidden+1]; //+1 because we have a bias input as well
		for (int i = 0; i < numOfOutput; i++) {
			outputWeights[i] = generateRandomWeights(numOfHidden+1); 
		}
		
		ContinuousNeuronLayerClassifier outputLayer = new ContinuousNeuronLayerClassifier(numOfOutput, outputWeights, 0.8);
		
		//link them up
		hiddenLayer.setNextLayer(outputLayer);
		outputLayer.setPreviousLayer(hiddenLayer);
		
		return hiddenLayer;
	}
	
	/*
	 * This allows you to load saved weights in a matrix
	 * 
	 * TODO: Generalise this out
	 */
	public static ContinuousNeuronLayerClassifier load2LayerNetwork(double[][] hiddenLayerWeights, double[][] outputLayerWeights) {
		ContinuousNeuronLayerClassifier hiddenLayer = new ContinuousNeuronLayerClassifier(hiddenLayerWeights.length, hiddenLayerWeights, 0.2);
		ContinuousNeuronLayerClassifier outputLayer = new ContinuousNeuronLayerClassifier(outputLayerWeights.length, outputLayerWeights, 0.2);
		
		hiddenLayer.setNextLayer(outputLayer);
		outputLayer.setPreviousLayer(hiddenLayer);
		
		return hiddenLayer;
	}
	
	/*
	 * This will load up the weights from a filename. There are actually 2 files that need the same pattern as the name:
	 * 					filename-output.txt and filename-hidden.txt
	 * This function will create a 2 layer neural net with the loaded weights
	 */
	public static ContinuousNeuronLayerClassifier loadWeights(String filename) {
		double[][] hiddenLayerWeights = null;
		double[][] outputLayerWeights = null;
		ArrayList<ArrayList<Double>> allLayerWeights = new ArrayList<ArrayList<Double>>();
		ArrayList<Double> oneLayerWeight = new ArrayList<Double>();
		
		String input = "";
				
		try {
			FileReader reader = new FileReader(filename);
			int read = 0;
			while (read != -1) {
				read = reader.read();
				if (read != -1) {
					if (read == (int)(','))  {
						oneLayerWeight.add(Double.valueOf(input));
						input = ""; //reset this
					} else if (read == (int)('\n')) {
						allLayerWeights.add(oneLayerWeight);
						oneLayerWeight = new ArrayList<Double>();
						input = ""; //reset this
					} else {
						input += (char) read;
					}
				}
			}
			reader.close();
			//translate from the array list to the array
			outputLayerWeights = new double[allLayerWeights.size()][allLayerWeights.get(0).size()];
			for (int i = 0; i < allLayerWeights.size(); i++) {
				for (int j = 0; j < allLayerWeights.get(0).size(); j++) {
					outputLayerWeights[i][j] = allLayerWeights.get(i).get(j);
				}
			}
			
			allLayerWeights.clear();
			oneLayerWeight.clear();
			filename = filename.substring(0, filename.lastIndexOf('-'));
			filename += "-hidden.txt";
			reader = new FileReader(filename);
			read = 0;
	
			while (read != -1) {
				read = reader.read();
				if (read != -1) {
					if (read == (int)(','))  {
						oneLayerWeight.add(Double.valueOf(input));
						input = ""; //reset this
					} else if (read == (int)('\n')) {
						allLayerWeights.add(oneLayerWeight);
						oneLayerWeight = new ArrayList<Double>();
						input = ""; //reset this
					} else {
						input += (char) read;
					}
				}
			}
			reader.close();
			hiddenLayerWeights = new double[allLayerWeights.size()][allLayerWeights.get(0).size()];
			for (int i = 0; i < allLayerWeights.size(); i++) {
				for (int j = 0; j < allLayerWeights.get(0).size(); j++) {
					hiddenLayerWeights[i][j] = allLayerWeights.get(i).get(j);
				}
			}
		} catch (Exception e) {
			System.out.println(e);
		}
		ContinuousNeuronLayerClassifier hiddenLayer = load2LayerNetwork(hiddenLayerWeights,outputLayerWeights);
		
		return hiddenLayer;
	}
	
	//TODO::Peter. You can see here how I am saving the weights, with the inputs being a 1 or -1 it can be even more simplistic
	//But I suggest a comma seperated list, with line breaks when the matrix finishes
	//We might want some common naming scheme so that we can reload in testing and validation sets for training constantly?
	/*
	 * This will save both the hidden and output layer weights to a filename following nnWeights-time-output.txt or nnWeights-time-hidden.txt
	 */
	public boolean saveWeights(String suffix) {
		boolean success = false;
		//this requires to move through the layers, luckily we know we only need to observe the next layer
		double[][] outputLayerWeights = this.nextLayer.getLastWeights();
		//hidden layer weights is on this object
		
		//write to a file, generate the name auto-magically
		String filename = "c:\\nnWeights-";
		if (suffix == null)
			filename += String.valueOf(System.currentTimeMillis());
		else
			filename += suffix;
		
		try {
			FileWriter writer = new FileWriter(filename+"-output.txt");
			for (double[] weightArray : outputLayerWeights) {
				for (double weight : weightArray) {
					writer.write(weight+",");
				}
				writer.write("\n");
			}
			writer.close();
			writer = new FileWriter(filename+"-hidden.txt");
			for (double[] weightArray : lastWeight) {
				for (double weight : weightArray) {
					writer.write(weight+",");
				}
				writer.write("\n");
			}
			writer.close();
			success = true;
		} catch (Exception e) {
			System.out.println(e);
		}
		return success;
	}
		
	public int trainTillClassifed(double[][] trainingInputs, double[][] desired, double[][]validationInputs) {
		//there are n inputs 
		int numOfInputs = trainingInputs.length;
		int trainingCycleNum = 0;
		//our check
		double[] errorValidation;
		double[] errorTraining;
		boolean success = false;	
		boolean check = true;
		
		while (!success) {
			//train for all inputs
			for (int i = 0; i < numOfInputs; i++) {
				this.trainLayer(trainingInputs[i], desired[i]);
			}
			trainingCycleNum++;
			
			//now try to go through all the inputs and classify them
			for (int i = 0; i < numOfInputs; i++) {
				errorValidation = this.classifyInput(validationInputs[i]);
				errorTraining = this.classifyInput(trainingInputs[i]);
				double[] errorMatrix = new double[errorTraining.length];
				boolean trainingClassified = true;
				boolean validationClassified = true;
				for (int j = 0; j < errorTraining.length; j++) {
					errorMatrix[j] = desired[i][j] - errorTraining[j];	
					if (errorMatrix[j] != 0) {
						trainingClassified = false;
						j = errorTraining.length; //early break clause
					}
				}
				for (int j = 0; j < errorTraining.length; j++) {
					errorMatrix[j] = desired[i][j] - errorValidation[j];	
					if (errorMatrix[j] != 0) {
						validationClassified = false;
						j = errorTraining.length; //early break clause
					}
				}
				if (trainingClassified && validationClassified) {
					check = check && true;
				} else
					check = check && false;
			}
			//additional check to make sure we don't loop for ever
			if ( (check == false) && (trainingCycleNum > 10000)) {
				success = true;
				trainingCycleNum = -1;
			}
			
			if (check == true)
				success = true;
			else {
				check = true; //reset it for next run
			}		
		}
		return trainingCycleNum;
	}
	
	/*
	 * The main function contains references to several different test methods. 
	 */
	public static void main(String args[]) {
		//ContinuousNeuronLayerClassifier.testInputOutput();
		//ContinuousNeuronLayerClassifier.testTrainerFn();
		//ContinuousNeuronLayerClassifier.question_3_7_randomWeights();
		ContinuousNeuronLayerClassifier.firstTest();
		//ContinuousNeuronLayerClassifier.selectWeights();
	}
	
	/*
	 * Quick function to make sure that it saves and loads weights
	 */
	public static void testInputOutput() {
		String fileSuffix = "TEST";
		String filename = "c:\\nnWeights-TEST-output.txt";
		
		double[][] weights_layer1 = new double[][] { {-6.9938, 6.6736, 1.5555}, {-4.2812, 3.9127, 3.6233}};
		double[][] weights_layer2 = new double[][] { {-0.8568, 0.3998, -1.0702}};
		
		ContinuousNeuronLayerClassifier neuralNet = load2LayerNetwork(weights_layer1,weights_layer2);
		
		neuralNet.saveWeights(fileSuffix);
		
		ContinuousNeuronLayerClassifier loadedNet = loadWeights(filename);
		
		System.out.println("Save/Load Test Complete");
		System.out.println("Should have identical output on the first net, and the one that had its weights loaded");
		neuralNet.question_3_7();
		loadedNet.question_3_7();
		
	}
	/*
	 * Quick function to make sure the trainUntilValidated function works
	 */
	public static void testTrainerFn() {
		double[][] weights_layer1 = new double[][] { {-6.9938, 6.6736, 1.5555}, {-4.2812, 3.9127, 3.6233}};
		double[][] weights_layer2 = new double[][] { {-0.8568, 0.3998, -1.0702}};
		
		double[][] z = new double[4][1];
		double[][] in = new double[][] { {0,0,-1}, {0,1,-1}, {1,1,-1}, {1,0,-1}};
		double[][] desired = new double[][] { {-1},{1},{-1},{1}};
		ContinuousNeuronLayerClassifier neuralNet = load2LayerNetwork(weights_layer1,weights_layer2);
		
		int numCycles = neuralNet.trainTillClassifed(in, desired, in);
		
		System.out.println("Trained for " + numCycles + " till validated");
		//Prove validation
		for (int i = 0; i < 4; i++) {
			System.out.println("Input # " + i);
			z[i] = neuralNet.classifyInput(in[i]);
			System.out.println(z[i][0]);
		}
		
	}
	/*
	 * Taken from the lecture notes, used for most testing
	 */
	public void question_3_7() {
		//input has been augmented
		double[][] in = new double[][] { {0,0,-1}, {0,1,-1}, {1,1,-1}, {1,0,-1}};
		double[][] desired = new double[][] { {-1},{1},{-1},{1}};
		double[][] z = new double[4][1];
		double[] cycleError = new double[250];
		int cycleNum = 0;
		for (int i = 0; i < 260; i++) {
			z[i%4] = this.trainLayer(in[i%4], desired[i%4]);
			if (i%4 == 0 && i != 0) {
				for (int j = 0; j < 4; j++) {
					cycleError[cycleNum] += desired[j][0] - z[j][0]; 
				}
				cycleNum++;
			}
		}
		for (double error : cycleError) {
			System.out.print(error+",");
		}
		System.out.println();
		
		//see if it can correctly classify them
		for (int i = 0; i < 4; i++) {
			System.out.println("Input # " + i);
			z[i] = this.classifyInput(in[i]);
			System.out.println(z[i][0]);
		}
	}
	
	/*
	 * Taken from the lecture notes, this time with random weights. Should converge eventually
	 */
	public static void question_3_7_randomWeights() {
		double[][] in = new double[][] { {0,0,-1}, {0,1,-1}, {1,1,-1}, {1,0,-1}};
		double[][] desired = new double[][] { {-1},{1},{-1},{1}};
		double[][] z = new double[4][1];
		
		double[][] weights_layer1 = new double[2][];
		double[][] weights_layer2 = new double[1][];
		weights_layer1[0] = generateRandomWeights(3);
		weights_layer1[1] = generateRandomWeights(3);
		
		weights_layer2[0] = generateRandomWeights(3);
		
		ContinuousNeuronLayerClassifier neuralNet = load2LayerNetwork(weights_layer1,weights_layer2);
		
		int numCycles = neuralNet.trainTillClassifed(in, desired, in);
		
		System.out.println("Trained for " + numCycles + " till validated");
		//Prove validation
		for (int i = 0; i < 4; i++) {
			System.out.println("Input # " + i);
			z[i] = neuralNet.classifyInput(in[i]);
			System.out.println(z[i][0]);
		}
		
	}
	
	public static void firstTest() {
		//we need to load several things
		//training inputs, desired outputs, validation inputs
		double[][] trainingInputs = new double[10][20*16];
		double[][] desiredOutputs = new double[10][10];
		double[][] validationInputs = new double[10][20*16];
		double[] sum = new double[10];
		//we are reading in from a fixed dir with the following naming scheme
		// nn-training-0.txt, nn-training-1.txt
		// nn-validation-0.txt, etc.
		
		//with the desired, we know what to expect for each input
		for (int i = 0; i < desiredOutputs.length; i++) {
			for (int j = 0; j < desiredOutputs[0].length; j++) {
				if (i == j) 
					desiredOutputs[i][j] = 1;
				else
					desiredOutputs[i][j] = -1;
			}
		}
		for (int k = 0; k < 10; k++) {
			NeuralNetworkSet temp = Filesystem.readData("c:\\nninputs\\nn-training-"+k+".txt");
			double[] input = new double[20*16];
			int i = 0;
			for (int[] out : temp.getInput()) {
				for (int o : out) {
					input[i] = (double) o;
					sum[k] += o; 
					i++;
				}
			}
			trainingInputs[k] = input;
		}
		System.out.println("Training inputs loaded");
		for (double o : sum) {
			System.out.print(o+",");
		}
		System.out.println();
		
		for (int k = 0; k < 10; k++) {
			NeuralNetworkSet temp = Filesystem.readData("c:\\nninputs\\nn-validation-"+k+".txt");
			double[] input = new double[20*16];
			int i = 0;
			sum[k] = 0;
			for (int[] out : temp.getInput()) {
				for (int o : out) {
					input[i] = (double) o;
					sum[k] += o; 
					i++;
				}
			}
			validationInputs[k] = input;
		}
		System.out.println("Validation inputs loaded");
		for (double o : sum) {
			System.out.print(o+",");
		}
		System.out.println();
		ContinuousNeuronLayerClassifier neuralNet = ContinuousNeuronLayerClassifier.create2LayerNetwork((20*16), 10, 6);
		for (int i = 0; i < 100000; i++) {
			neuralNet.trainLayer(trainingInputs[i%10], desiredOutputs[i%10]);
		}
		double[][] z = new double[10][10];
		for (int i = 0; i < 10; i++) {
			System.out.println("Input #" + i);
			z[i] = neuralNet.classifyInput(trainingInputs[i]);
			for (double out : z[i]) {
				System.out.print(out+",");
			}
			System.out.println();
		}
	}
	
	public static void selectWeights() {
		//we need to load several things
		//training inputs, desired outputs, validation inputs
		double[][] trainingInputs = new double[10][20*16];
		double[][] desiredOutputs = new double[10][10];
		double[][] validationInputs = new double[10][20*16];
		//we are reading in from a fixed dir with the following naming scheme
		// nn-training-0.txt, nn-training-1.txt
		// nn-validation-0.txt, etc.
		
		//with the desired, we know what to expect for each input
		for (int i = 0; i < desiredOutputs.length; i++) {
			for (int j = 0; j < desiredOutputs[0].length; j++) {
				if (i == j) 
					desiredOutputs[i][j] = 1;
				else
					desiredOutputs[i][j] = -1;
			}
		}
		for (int k = 0; k < 10; k++) {
			NeuralNetworkSet temp = Filesystem.readData("c:\\nninputs\\nn-training-"+k+".txt");
			double[] input = new double[20*16];
			int i = 0;
			for (int[] out : temp.getInput()) {
				for (int o : out) {
					input[i] = (double) o;
					i++;
				}
			}
			trainingInputs[k] = input;
		}
		
		for (int k = 0; k < 10; k++) {
			NeuralNetworkSet temp = Filesystem.readData("c:\\nninputs\\nn-validation-"+k+".txt");
			double[] input = new double[20*16];
			int i = 0;
			for (int[] out : temp.getInput()) {
				for (int o : out) {
					input[i] = (double) o;
					i++;
				}
			}
			validationInputs[k] = input;
		}
		ContinuousNeuronLayerClassifier neuralNet = null;
		int numCycles = -1;
		int numNetworks = 0;
		while (numCycles == -1) {
			neuralNet = ContinuousNeuronLayerClassifier.create2LayerNetwork((20*16), 10, 6);
			numCycles = neuralNet.trainTillClassifed(trainingInputs, desiredOutputs, validationInputs);
			System.out.println("Net #"+numNetworks+" gave numCycles="+numCycles);
			if (numCycles > -1) {
				neuralNet.saveWeights(null);
			}
			numNetworks++;
		}
		
	}
	
}
