package mlp;

import java.io.Serializable;
import java.util.ArrayList;
import java.util.BitSet;
import java.util.List;

import utils.RandomGenerator;

/**
 * A neural network which enables trimming edges
 */
public class SparseMlp extends Mlp implements Serializable {

	private static final long serialVersionUID = -2698437166314291539L;

	private List<boolean[][]> _edgeSwitch;

	public static int INPUTS = 50;
	public static int HIDDEN_LAYYER = 50;
	public static int OUTPUT_LAYYER = 1; // number of classes

	private static final int BATCH_AMOUNT = 1; // number of examples in each batch
	
	// should be initialized according to relevant value
	public static int NUMBER_EXAMPLE = 0; 
	public static float ERROR_RATE = 0.15f;
	public static float LEARNING_RATE = 0.01f;
	public static float MOMENTUM = 0.0f;
	public static float INIT_WEIGHT_RANGE = 0.125f; // initialize random weights in range [-INIT_WEIGHT_RANGE,+INIT_WEIGHT_RANGE]
	
	private ArrayList<Layer> _layers;
	private ArrayList<float[][]> _delta_w;
	private ArrayList<float[][]> _prev_delta_w; // used for momentum
	private ArrayList<float[]> _grad_ex;

	public static final int POSITIVE = 1; // positive evaluation output for an output unit
	public static final int NEGATIVE = -1; // negative evaluation output for an output unit
	
	private float _threshold = 0.9f;
	
	/**
	 * Constructor- initializes layers list
	 * Method is private because should create a new instance
	 * using the following static methods
	 * @param numLayers how many layers the network has (including output)
	 */
	private SparseMlp(int numLayers) {
		
		super(numLayers);
		
		// allocate layers container
		_layers = new ArrayList<Layer>();
	
		// allocate switch list
		_edgeSwitch = new ArrayList<boolean[][]>();
	}

	/**
	 * Initializes given network's fields according
	 * to the generated layers  and sets all switches
	 * to be True (default is to consider all edges)
	 * @param mlp neural network to initialize
	 */
	private static void initData(SparseMlp mlp) {
		
		int numLayers = mlp._layers.size();

		try {
			// initializes delta weights
			mlp._delta_w = new ArrayList<float[][]>();
			for (int i = 0; i < numLayers; ++i) {
				mlp._delta_w.add(new float
						[mlp._layers.get(i).size()]
								[mlp._layers.get(i).getWeights(0).length]
						);			
			}
		} catch (Exception e) {
			System.out.println("");
		}

		// initialize previous delta weights
		mlp._prev_delta_w = new ArrayList<float[][]>();
		for (int i = 0; i < numLayers; ++i) {
			mlp._prev_delta_w.add(new float
					[mlp._layers.get(i).size()]
							[mlp._layers.get(i).getWeights(0).length]
					);			
		}
		
		// reset previous delta values for each weight
		for (int c = 0; c < numLayers ; ++c) {
			for (int i = 0; i < mlp._layers.get(c).size(); ++i) {
				float currWeights[] = mlp._layers.get(c).getWeights(i);
				for (int j = 0; j < currWeights.length; ++j)					
					mlp._prev_delta_w.get(c)[i][j] = 0;
	        }		
		}
		
		// initialize gradients
		mlp._grad_ex = new ArrayList<float[]>();
		for (int i =  0; i < numLayers; ++i) {
			mlp._grad_ex.add(new float[mlp._layers.get(i).size()]);
		}
		
		// set all edge switches as True
		for (int currLayer = 0 ; currLayer < numLayers ; ++currLayer) { // for each layers
			
			// initialize switch for current layer's edges
			int numNeurons = mlp._layers.get(currLayer).size();
			int numInputWeights = (currLayer == 0) ? INPUTS : mlp._layers.get(currLayer-1).size();
			boolean[][] currLayerSwitch = new boolean[numNeurons][numInputWeights+1]; // include bias
			for (int currNeuron = 0 ; currNeuron < numNeurons ; ++currNeuron) { // for each neuron
				for (int currWeight = 0 ; currWeight < numInputWeights ; ++currWeight) { // for each input edge
					currLayerSwitch[currNeuron][currWeight] = true;					
				}
			}
			
			// add current layer's switch
			mlp._edgeSwitch.add(currLayerSwitch);
		}
	}
	
	/**
	 * Creates a new neural network according to given weights
	 * @param weights list where i'th element is weights for i'th layer
	 * @return generated network
	 */
	public static SparseMlp createByWeights(ArrayList< ArrayList< float[]> > weights) {

		// initialize a network
		int numLayers = weights.size();
		SparseMlp mlp = new SparseMlp(numLayers);
		
		// create the required layers
		mlp._layers = new ArrayList<Layer>();				
		for (int i = 0; i < numLayers ; ++i)
		{
			mlp._layers.add( new Layer(weights.get(i)));
		}		
		
		// initialize network's data
		initData(mlp);
		
		// return the built network
		return mlp;
	}
	
	/**
	 * Creates a new neural network according to given layers
	 * @param layers layers list where i'th element is network's i'th layer
	 * @return generated network
	 */
	public static SparseMlp createByLayers(ArrayList<Layer> layers) {
		
		// initialize a network
		int numLayers = layers.size();
		SparseMlp mlp = new SparseMlp(numLayers);
		
		// set its layers
		mlp._layers = layers;
		
		// initialize network's data
		initData(mlp);
		
		// return the built network
		return mlp;
	}

	/**
	 * Creates a new neural network according to specified topology.
	 * All weights are chosen randomly
	 * @param nn_neurons topology- a list where i'th element is number
	 * of neurons in i'th layer
	 * @return generated network
	 */
	public static SparseMlp createsByLayersSize(int nn_neurons[])	{
		
		// initialize a network
		int numLayers = nn_neurons.length;
		SparseMlp mlp = new SparseMlp(numLayers);

		// create the required layers
		mlp._layers = new ArrayList<Layer>();

		// set first hidden layer input layer
		mlp._layers.add(new Layer(INPUTS, nn_neurons[0]));
		
		// set all other layers
		for (int i = 1; i < numLayers ; ++i) {			
			mlp._layers.add(new Layer(nn_neurons[i - 1], nn_neurons[i]));
		}
		
		// initialize network's data
		initData(mlp);
		
		// return the built network
		return mlp;
	}
	
	public void setThreshold(float threshold) {
		this._threshold = threshold;
	}
	
	/**
	 * Sets which edge is to be considered (turned on) and
	 * which is not (turned off)
	 * @param newSwitch the new edge switch to set.
	 * each matrix (= list element) represents a layer, where 
	 * [i,j]'th entry indicates the switch value for j'th input 
	 * edge of the i'th neuron
	 */
	public void setSwitch(List<boolean[][]> newSwitch) {
		
		// update switch for each input edge
		for (int currLayer = 0 ; currLayer < _layers.size() ; ++currLayer) { // for each layer
			
			// extract data for current layer
			boolean[][] currLayerSwitch = _edgeSwitch.get(currLayer);
			boolean[][] newLayerSwitch = newSwitch.get(currLayer);
			int numNeurons = _layers.get(currLayer).size();
			int numInputWeights = (currLayer == 0) ? INPUTS : _layers.get(currLayer-1).size();
			
			// update each edge
			for (int currNeuron = 0 ; currNeuron < numNeurons ; ++currNeuron) { // for each neuron
				for (int currWeight = 0 ; currWeight < numInputWeights ; ++currWeight) { // for each input edge

					// if turning on an edge that was off, pick a random weight for it
					if (newLayerSwitch[currNeuron][currWeight] && !currLayerSwitch[currNeuron][currWeight]) {
						
						_layers.get(currLayer).setWeight(currNeuron, currWeight, RandomGenerator.nextFloat(INIT_WEIGHT_RANGE));
						
					} else if (!newLayerSwitch[currNeuron][currWeight]) { // turn an edge off
						_layers.get(currLayer).setWeight(currNeuron, currWeight, 0);
					}
				}
			}			
		}
	}
	
	/**
	 * Sets which edge is to be considered (turned on) and
	 * which is not (turned off)
	 * @param newSwitch the new edge switch to set.
	 * the bits set is parsed in the following manner:
	 * for input layer, offset for each neuron is
	 * #neuron * #edge_for_each_neuron + #edge
	 * and for hidden layer, offset for each neuron is
	 * #neurons * #edges of hidden layer + #neuron * #edge_for_each_neuron + #edge 
	 */
	public void setSwitch(BitSet newSwitch) {
		
		// extract switch value for each edge of hidden layer
		boolean[][] hiddenLayerSwitch = new boolean[SparseMlp.HIDDEN_LAYYER][SparseMlp.INPUTS];
		int numInputs = SparseMlp.INPUTS; // number of input edges is number of inputs for the network
		for (int currNeuron = 0 ; currNeuron < SparseMlp.HIDDEN_LAYYER ; ++currNeuron) {
			for (int currEdge = 0 ; currEdge < SparseMlp.INPUTS ; ++currEdge) {
			
				// offset is #neuron * #edge_for_each_neuron + #edge
				hiddenLayerSwitch[currNeuron][currEdge] = (newSwitch.get(currNeuron
						* numInputs + currEdge)) ? true : false;
			}
		}
		
		// extract switch value for each edge of output layer
		boolean[][] outputLayerSwitch = new boolean[SparseMlp.OUTPUT_LAYYER][SparseMlp.HIDDEN_LAYYER];
		numInputs =  SparseMlp.HIDDEN_LAYYER; // number of input edges is number of neurons in hidden layer
		int prevLayerOffset = SparseMlp.HIDDEN_LAYYER * SparseMlp.INPUTS; // offset to skip previous layer's data = #neurons * #edges of hidden layer
		for (int currNeuron = 0 ; currNeuron < SparseMlp.OUTPUT_LAYYER ; ++currNeuron) {
			for (int currEdge = 0 ; currEdge < SparseMlp.HIDDEN_LAYYER ; ++currEdge) {
				
				outputLayerSwitch[currNeuron][currEdge] = (newSwitch
						.get(prevLayerOffset + currNeuron * numInputs
								+ currEdge)) ? true : false;
			}
		}
		
		// create the switch data for all layers
		List<boolean[][]> switchData = new ArrayList<boolean[][]>(2);
		switchData.add(hiddenLayerSwitch);
		switchData.add(outputLayerSwitch);
		
		// update switch for each edge
		setSwitch(switchData);
	}
	
	/**
	 * Classifies given example according to neural network's result
	 * @param inputs example to classify
	 * @return example's classification
	 */
	public int getClassification(float[] inputs) {

		// check if classification is binary
		if (OUTPUT_LAYYER == 1) {
			return getBinaryClassification(inputs);
		}
		
		/*
		 * propagate the inputs through all neural network
		 * and return the outputs
		 */
		assert(false);

		float outputs[] = new float[inputs.length];
		
		for( int i = 0; i < _layers.size(); ++i ) {  
			outputs = _layers.get(i).evaluate(inputs);
			inputs = outputs;
		}
		
		return getIndexOfMaxVal(outputs);
	}

	public int getIndexOfMaxVal(float[] outputs) {
		int bestIndex = 0;
		float bestOut = -2;
		for(int i = 0 ; i < OUTPUT_LAYYER ; ++i)
		{
			if(outputs[i] > bestOut)
			{
				bestOut = outputs[i];
				bestIndex = i;
			}
		}

		return bestIndex;
	}
	
	public int getBinaryClassification(float[] inputs) {

		/*
		 * propagate the inputs through all neural network
		 * and return the outputs
		 */
		float outputs[] = new float[inputs.length];
		
		for( int i = 0; i < _layers.size(); ++i ) {  
			outputs = _layers.get(i).evaluate(inputs);
			inputs = outputs;
		}
		
		return (outputs[0] > _threshold) ? POSITIVE : NEGATIVE;
	}
	
	/**
	 * Runs given inputs through the network.
	 * Each neuron stores its output
	 * @param inputs training examples
	 * @return outputs of the output layer
	 */
	public float[] evaluate(float[] inputs)	{
		// propagate the inputs through all neural network
		// and return the outputs
		assert(false);
		
		int outputSize =  _layers.get(0).size();
		float outputs[] = new float[outputSize];

		for( int i = 0; i < _layers.size(); ++i ) {  
			outputs = _layers.get(i).evaluate(inputs);
			inputs = outputs;
		}

		return outputs;
	}
	
	private float evaluateError(float nn_output[], float desired_output[]) {
		
		float d[];

		// add bias to input if necessary
		if (desired_output.length != nn_output.length)
			d = Layer.add_bias(desired_output);
		else
			d = desired_output;

		assert(nn_output.length == d.length);

		float e = 0;
		for (int i = 0; i < nn_output.length; ++i) {
			e += (nn_output[i] - d[i]) * (nn_output[i] - d[i]);
		}

		return e;
	}

	public float evaluateQuadraticError(ArrayList<float[]> examples,
								   ArrayList<float[]> results) {
		
		// this function calculates the quadratic error for the given 
		// examples/results sets
		assert(false);

		float e = 0;

		int numTests = examples.size();
		for (int i = 0; i < numTests ; ++i) {
			e += evaluateError(evaluate(examples.get(i)), results.get(i));
		}

		return e;
	}

	/**
	 * Sets </code> _delta_w </code> with 0's
	 */
	private void resetWeightsDelta() {
		
		// reset delta values for each weight
		for (int c = 0; c < _layers.size(); ++c) {
			for (int i = 0; i < _layers.get(c).size(); ++i) {
				float weights[] = _layers.get(c).getWeights(i);
				for (int j = 0; j < weights.length; ++j) {			
					_delta_w.get(c)[i][j] = 0;
				}
	        }		
		}
	}
	
	/**
	 * Calculates error for each neuron, should be run after the evaluation
	 * @param results correct results of the training examples
	 */
	private void evaluateGradients(float[] results)
	{
		
		int lastLayer = _layers.size()-1;
		
		// evaluate gradient for output layer
		for (int currNeuron = 0; currNeuron < _layers.get(lastLayer).size(); ++currNeuron) {
			
			// grad_ex <-- (correct_output - actual_output) * activation_derivative: 
			_grad_ex.get(lastLayer)[currNeuron] = (results[currNeuron] - _layers
					.get(lastLayer).getOutput(currNeuron))
					* _layers.get(lastLayer)
							.getActivationDerivative(currNeuron);
		}
		
		// for each neuron in other layers
		for (int currLayer = lastLayer - 1; currLayer >= 0; --currLayer) {
			for (int currNeuron = 0; currNeuron < _layers.get(currLayer).size(); ++currNeuron) {
				
				// evaluate gradient according to next layer's
				float sum = 0;
				for (int k = 0; k < _layers.get(currLayer + 1).size(); ++k) {
					
					// skip edges (from current neuron to next layer) that are turned off
					if (!_edgeSwitch.get(currLayer+1)[k][currNeuron]) {
						continue;
					}
					
					sum += _layers.get(currLayer + 1).getWeight(k, currNeuron)
							* _grad_ex.get(currLayer + 1)[k];
				}
				_grad_ex.get(currLayer)[currNeuron] = _layers.get(currLayer)
						.getActivationDerivative(currNeuron) * sum;

			}
		}
	}

	/**
	 * Calculates weight update for each edge in the network
	 * @param inputs input from previous layer
	 */
	private void evaluateWeightsDelta(float[] inputs)
	{
		// evaluate delta values for each weight
		for (int currLayer = 1; currLayer < _layers.size(); ++currLayer) { // for each layer
			for (int currNeuron = 0; currNeuron < _layers.get(currLayer).size(); ++currNeuron) { // for each neuron
				
				// get input weights of current neuron
				float weights[] = _layers.get(currLayer).getWeights(currNeuron);
				
				// for each input edge of current neuron
				for (int currW = 0; currW < weights.length; ++currW) {
				
					// skip edges that are turned off
					if (!_edgeSwitch.get(currLayer)[currNeuron][currW]) {
						continue;
					}
					
					/*
					 * calculate weight update for current edge:
					 * Delta_w <-- LR * error * input + M * prev_Delta_w 
					 */
					_delta_w.get(currLayer)[currNeuron][currW] += LEARNING_RATE * _grad_ex.get(currLayer)[currNeuron]
							* _layers.get(currLayer - 1).getOutput(currW)
							+ MOMENTUM * _prev_delta_w.get(currLayer)[currNeuron][currW];
				}
			}
		}
		
		// evaluate delta values for first hidden layer according to given input
		for (int currNeuron = 0; currNeuron < _layers.get(0).size(); ++currNeuron) { // for each neuron
			
			// get input weights of current neuron
			float weights[] = _layers.get(0).getWeights(currNeuron);
			
			// for each input edge of current neuron
			for (int currW = 0; currW < weights.length - 1; ++currW) { // read inputs only- no bias
				
				// skip edges that are turned off
				if (!_edgeSwitch.get(0)[currNeuron][currW]) {
					continue;
				}
				
				// calculate weight update for current edge
				_delta_w.get(0)[currNeuron][currW] += LEARNING_RATE
						* _grad_ex.get(0)[currNeuron] * inputs[currW]
						+ MOMENTUM * _prev_delta_w.get(0)[currNeuron][currW];
			}
			// calculate update for bias
			_delta_w.get(0)[currNeuron][weights.length - 1] += LEARNING_RATE
					* _grad_ex.get(0)[currNeuron] * Neuron.BIAS + MOMENTUM
					* _prev_delta_w.get(0)[currNeuron][weights.length - 1];
		}
		
		// update previous value of weights delta
		updatePrevWeightsDelta();
	}

	/**
	 * Assigns previous-weights-delta's values those of the current one
	 */
	private void updatePrevWeightsDelta() {
		
		for (int c = 0; c < _layers.size(); ++c) {
			for (int i = 0; i < _layers.get(c).size(); ++i) {
				float weights[] = _layers.get(c).getWeights(i);
				for (int j = 0; j < weights.length; ++j) {					
					_prev_delta_w.get(c)[i][j] = _delta_w.get(c)[i][j];
				}
	        }		
		}
	}
	
	/**
	 * Updates each edge's weight according to back propagation:
	 * w' <-- w + Delta_w
	 */
	private void updateWeights() {
		// for each layer
		for (int currLayer = 0; currLayer < _layers.size(); ++currLayer) {
			// for each neuron
			for (int currNeuron = 0; currNeuron < _layers.get(currLayer).size(); ++currNeuron) {
								
				// for each input edge that is turned on
				float weights[] = _layers.get(currLayer).getWeights(currNeuron);
				for (int currW = 0; currW < weights.length; ++currW) {

					// skip edges that are turned off
					try {
						if (!_edgeSwitch.get(currLayer)[currNeuron][currW]) {
							continue;
						}						
					} catch (Exception e) {
						System.out.println("");
					}
					
					// update weights (consider momentum)
					_layers.get(currLayer).setWeight(currNeuron, currW, _layers.get(currLayer).getWeight(currNeuron, currW)
							+ _delta_w.get(currLayer)[currNeuron][currW]);
				}
	        }
		}		
	}

	/**
	 * Runs a single iteration of back propagation:
	 * for each </code> batchAmount </code> of training examples,
	 * runs the examples through the network and calculates the necessary 
	 * weight updates.
	 * When a batch is done, updates the network's weights  
	 * @param examples training examples
	 * @param results results of training examples 
	 */
	private void batchBackPropagation(ArrayList<float[]> examples,
			ArrayList<float[]> results) {

		for (int l = 0; l < examples.size(); ++l) {
			if(l % BATCH_AMOUNT == 0)
			{
				resetWeightsDelta();
			}
			try {				
				evaluate(examples.get(l));
			} catch (Exception e) {
				System.out.println("evaulate");
			}
			try {
				evaluateGradients(results.get(l));				
			}  catch (Exception e) {
				System.out.println("evaulate gradients");
			}
			try {
				evaluateWeightsDelta(examples.get(l));
			}  catch (Exception e) {
				System.out.println("evaluate weights");
			}
			
			if(l % BATCH_AMOUNT == 0 || l == NUMBER_EXAMPLE-1)
			{
				updateWeights();
			}
		}
	}

	public float learn(ArrayList<float[]> examples, ArrayList<float[]> results) {

		// this function implements a batched back propagation algorithm
		assert(false);

		float e = Float.POSITIVE_INFINITY;

		int counter = 0;
		while (e > (float)examples.size()*ERROR_RATE) {


			batchBackPropagation(examples, results);
			e = evaluateQuadraticError(examples, results);
			if(counter % 10 == 0) System.out.println("After the iteration "+counter+" the error is "+e);
			if(counter > 450)
			{
				System.out.println("--- The learning stopped after 450 iterations. (probably local minimum) ---");
				break;
			}
			counter++;
		}
		System.out.println("\n\nLast Error: "+e);
		System.out.println("Learning completed");

		return e;
	}

	/**
	 * Runs back propagation for specified number of epochs
	 * @param examples examples used to train network
	 * @param results results of training examples
	 * @param epochs number of back propagation iterations
	 * @return network's quadratic errors on entire training set after training
	 */
	public float learn(ArrayList<float[]> examples, ArrayList<float[]> results,
			int epochs) {
		
		// perform back propagation for specified amount of epochs
		for (int i = 0 ; i < epochs ; ++i) {
			batchBackPropagation(examples, results);			
		}
		
		// return the network's error after learning
		return evaluateQuadraticError(examples, results);
	}
	
	public int getNumLayers() {
		return _layers.size();
	}
	
	public int[] getStructure() {
		
		int[] structure = new int[_layers.size()];
		for (int i = 0 ; i < _layers.size() ; ++i) {
			structure[i] = _layers.get(i).size();
		}
		
		return structure;
	}
	
	/**
	 * Adds given layer at i'th position
	 * @param i where to add the new layer
	 * @param newLayer layer to add
	 * @return False if given layer is meant for output
	 * and its size is not </code> OUTPUT_LAYER </code>
	 */
	public boolean addLayer(int i, Layer newLayer) {
		
		// validate layer size for output
		if (i == _layers.size() - 1) {
			if (newLayer.size() != OUTPUT_LAYYER) {
				return false;
			}
		}

		// set layer
		_layers.add(i, newLayer);
		return true;
	}
	
	/**
	 * Replaces current i'th layer with given layer
	 * @param i where to set given layer
	 * @param newLayer layer to set
	 */
	public void setLayer(int i, Layer newLayer) {
		
		_layers.set(i, newLayer);
	}
	
	/**
	 * Return's i'th layer
	 * @param i index of layer to return
	 * @return i'th layer, if i is invalid returns null
	 */
	public Layer getLayer(int i) {
		
		// check boundaries
		if (i < 0 || i > _layers.size()) {
			return null;
		}
		
		// return i'th layer
		return _layers.get(i);
	}
	
	public String toString() {
		
		String output = "";
		
		for (int layer = 0 ; layer < _layers.size() ; ++layer) { // for each layer
			output += "layer #" + layer + ":\n";
			for (int neuron = 0 ; neuron < _layers.get(layer).size() ; ++neuron) { // for each neuron
				output += "[neuron #" + neuron + ":";
				for (int edge = 0 ; edge < _layers.get(layer).getWeights(neuron).length ; ++edge) { // for each input edge
					output += " " + _layers.get(layer).getWeights(neuron)[edge];
				}
				output += "] ; ";
			}
			output += "\n";
		}
		
		return output;
	}
}
