/**
 * Copyright 2010 Neuroph Project http://neuroph.sourceforge.net
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *    http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package neuralnetwork;


import java.util.Arrays;
import java.util.Iterator;

import main.PathLengthNormalisedSmoothing;

import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.apache.log4j.PropertyConfigurator;
import org.neuroph.core.Connection;
import org.neuroph.core.Layer;
import org.neuroph.core.Neuron;
import org.neuroph.core.Weight;
import org.neuroph.core.learning.SupervisedTrainingElement;
import org.neuroph.core.learning.TrainingElement;

import tools.CalculatePathLengthNormalizedSmoothing;
import tools.UtilFunctions;

/**
 * Smoothed learning algorithm ( this is written on top of existing code for momentum backpropagation)
 * 
 * 
 * @author Vivek
 */
public class SmoothingAlgorithm extends ModifiedBackPropagation {

	
	static final Logger logger = Logger.getLogger(SmoothingAlgorithm.class); // Setup logging
	double delta;									/* Step size for weight change */

	/**
	 * The class fingerprint that is set to indicate serialization compatibility
	 * with a previous version of the class.
	 */
	private static final long serialVersionUID = 1L;
	/**
	 * Momentum factor
	 */
	protected double momentum = 0.25d;

	/**
	 * Creates new instance of MomentumBackpropagation learning
	 */
	public SmoothingAlgorithm() {
		super();
		delta = .001;
        // loading logging properties
        PropertyConfigurator.configure("Logging Properties/log4j.properties");

	}

	/**
	 * This method implements weight update procedure written on top of code for momentum 
	 * backpropagation it takes into account the smoothness cost of weight change.
	 * 
	 * @param neuron
	 *            neuron to update weights
	 */
	@Override
	protected void updateNeuronWeights(Neuron neuron) {
		
		logger.setLevel(Level.INFO);
		
		
		logger.debug("Entered updateNeuronWeights");
		
		
		for (Connection thisconnection : neuron.getInputConnections()) {

			/*
			 * Below block of code is same as standard momentum backpropagation.
			 * 
			 */
			myConnection connection = (myConnection) thisconnection;
			
			double input = connection.getInput();
			
			
//			if (input == 0) {
//				logger.info("input "+input);				
//				continue;
//
//			}
			
//			System.out.println(connection.getFromNeuron());
			
			logger.debug("input "+input);
			// get the error for specified neuron,
			double neuronError = neuron.getError();


			Weight weight = connection.getWeight();
			MomentumWeightTrainingData weightTrainingData = (MomentumWeightTrainingData) weight
					.getTrainingData();

			
//			double previousWeightValue = weightTrainingData.previousValue;

			/*
			 * This block of code is implemented as part of new algorithm
			 */			
	           //double currentWeightValue = weight.getValue();
         double previousWeightValue = weightTrainingData.previousValue;

         Double cost = this.getCost(delta, weight,previousWeightValue);		/* Get the cost for weight change */
         double weightchangedirection = cost;
         logger.debug("Direction :"+cost);
 
         
//       double weightChange1 = this.learningRate * neuronError * input
//               + momentum * (weight.value - previousWeightValue);  // Momentum backpropagation with cost sensitive direction

         double weightChange = cost*Math.abs(this.learningRate * neuronError * input)
                 + momentum * (weight.value - previousWeightValue);  // Momentum backpropagation with cost sensitive direction
         
         if (cost != weightChange/Math.abs(weightChange) ){
             logger.info("Incorrect direction ***************************");           
         }
         
//         double weightChange = weightchangedirection * 0.1 * Math.sqrt(neuronError * neuronError) * Math.sqrt(input * input) + 0.2 * (weight.value - previousWeightValue);
						
			
			/*
			 * Below block of code is same as standard momentum backpropagation.
			 * 
			 */
			
            
            // save previous weight value
            //weight.getTrainingData().set(TrainingData.PREVIOUS_WEIGHT, currentWeightValue);
            weightTrainingData.previousValue = weight.value;            
            
            
            
            // if the learning is in batch mode apply the weight change immediately
            if (this.isInBatchMode() == false) {
                weight.weightChange = weightChange;
                weight.value += weightChange;
            } else { // otherwise, sum the weight changes and apply them after at the end of epoch
                weight.weightChange += weightChange;
            }
		
		}

	}
		
	
	/**
	 * This function gives the smoothness cost for given weight choice
	 * 
	 * @param delta		Step size for weight change
	 * @param weight	Weight that needs to be changed
	 * @param previousWeightValue 
	 * @param inc		Weather to increase of decrease weight.
	 * @return
	 */
	private double getCost(double delta, Weight weight, double previousWeightValue) {

		int numPattern = this.getTrainingSet().size();
		int totalPointsconsidered = 4;   // Points include previous, current, next , goal
		double curentWeight = weight.value;
		
		double[] currentInput = ((MultiLayerPerceptron) this.getNeuralNetwork())
				.getInput();
		
		
		double[][] neuralOutputs_increase = new double[totalPointsconsidered][numPattern+1];
		double[][] neuralOutputs_decrease = new double[totalPointsconsidered][numPattern+1 ];
		
		
		int m=1;
		for (;m < neuralOutputs_increase.length;m++){
			neuralOutputs_increase[m-1][0] = 4*.1*m;
			neuralOutputs_decrease[m-1][0] = 4*.1*m;
		} 
		neuralOutputs_increase[m-1][0] = 4*.1*m;
		neuralOutputs_decrease[m-1][0] = 4*.1*m;
		
		int patternNum = 0;

		
		/*
		 * In this block of code:
		 * 1)	We iterate through all the training example
		 * 2)	Calculate desired desired output for each example
		 * 3)	Calculate current and next neural output by changing neural weight
		 *  	
		 */
		Iterator<TrainingElement> iterator = this.getTrainingSet().iterator();  /*  Iterator to all the training examples*/
		while (iterator.hasNext() && !isStopped()) {							/* Learning continues till total number of iteration exceed configured value or either output error is below threshold */
			SupervisedTrainingElement supervisedTrainingElement = (SupervisedTrainingElement) iterator
					.next();
			
			double[] input = supervisedTrainingElement.getInput();
//			boolean check = Arrays.equals(currentInput, input);
//			if (!check) {
				patternNum++;
				
//				weight.setValue(curentWeight);								
				this.neuralNetwork.setInput(input);
				this.neuralNetwork.calculate();

				
//				System.out.println("Start:"+Arrays.toString(this.neuralNetwork.getOutput()));
//				System.out.println("Start:"+Arrays.toString(input));
				
				weight.setValue(previousWeightValue);
				this.neuralNetwork.calculate();
				double[] changedOutput_p = this.neuralNetwork.getOutput();

				
				weight.setValue(curentWeight);				
				this.neuralNetwork.calculate();
				double[] currentOutput = this.neuralNetwork.getOutput();
				double[] desiredOutput = supervisedTrainingElement
						.getDesiredOutput();
				
				
				
				
				weight.inc(delta);
				this.neuralNetwork.calculate();
				double[] changedOutput_i = this.neuralNetwork.getOutput();
				weight.dec(delta);
				this.neuralNetwork.calculate();

				
				weight.dec(delta);
				this.neuralNetwork.calculate();
				double[] changedOutput_d = this.neuralNetwork.getOutput();
				weight.inc(delta);
				
				this.neuralNetwork.calculate();

//				System.out.println("End:"+Arrays.toString(this.neuralNetwork.getOutput()));
				
				neuralOutputs_increase[0][patternNum] = changedOutput_p[0];
				neuralOutputs_increase[1][patternNum] = currentOutput[0];
				neuralOutputs_increase[2][patternNum] = changedOutput_i[0];
				neuralOutputs_increase[3][patternNum] = desiredOutput[0];
														
														
				neuralOutputs_decrease[0][patternNum] = changedOutput_p[0];
				neuralOutputs_decrease[1][patternNum] = currentOutput[0];
				neuralOutputs_decrease[2][patternNum] = changedOutput_d[0];
                neuralOutputs_decrease[3][patternNum] = desiredOutput[0];
                                
//			}
		}
		
		this.neuralNetwork.setInput(currentInput);
		this.neuralNetwork.calculate();		

//		System.out.println("B Increase :"+UtilFunctions.display2DArray(neuralOutputs_increase));
//		System.out.println("B Decrease :"+UtilFunctions.display2DArray(neuralOutputs_decrease));
		applyOffset(neuralOutputs_increase);   /* Apply offset to the output */
		applyOffset(neuralOutputs_decrease);   /* Apply offset to the output */
//		System.out.println("B Increase :"+UtilFunctions.display2DArray(neuralOutputs_increase));
//		System.out.println("B Decrease :"+UtilFunctions.display2DArray(neuralOutputs_decrease));


		/*
		 * Create and solve linear equation for current,next and goal output values 
		 */
//		CalculatePathLengthNormalizedSmoothing cplns = new CalculatePathLengthNormalizedSmoothing();
//		CalculatePathLengthNormalizedSmoothing cplns1 = new CalculatePathLengthNormalizedSmoothing();

		PathLengthNormalisedSmoothing plns_inc = new PathLengthNormalisedSmoothing(neuralOutputs_increase);
		PathLengthNormalisedSmoothing plns_dec = new PathLengthNormalisedSmoothing(neuralOutputs_decrease);
		
		
//		System.out.println(cplns.getAlgorithmName());
		logger.debug("Neural Output Inc :"+UtilFunctions.display2DArray(neuralOutputs_increase));
//		Double cost1=cplns.getSmoothnessValue(neuralOutputs_increase);   /* Get the smoothing values*/
		Double cost1=plns_inc.calculateSmoothness();   /* Get the smoothing values*/
//		logger.info("Cost Inc :"+cost1);
		
		logger.debug("Neural Output Dec:"+UtilFunctions.display2DArray(neuralOutputs_decrease));
//		Double cost2=cplns1.getSmoothnessValue(neuralOutputs_decrease);   /* Get the smoothing values*/
		Double cost2=plns_dec.calculateSmoothness();   /* Get the smoothing values*/
		
//		logger.info("Cost Dec:"+cost2);
				
		if (cost1 <= cost2) {
//			cost1 = cost1;
			logger.info("Returned cost Inc:"+cost1);
			return 1;
//			 System.out.println("^");
		} else {
//			cost2 = -1 * cost2;
			logger.info("Returned cost Dec:"+cost2);
			return -1;
//			 System.out.println("\\/");
		}
	}

	/**
	 * Returns the momentum factor
	 * 
	 * @return momentum factor
	 */
	public double getMomentum() {
		return momentum;
	}

	/**
	 * Sets the momentum factor
	 * 
	 * @param momentum
	 *            momentum factor
	 */
	public void setMomentum(double momentum) {
		this.momentum = momentum;
	}

	
	public class MomentumWeightTrainingData {
		public double previousValue;
	}

	@Override
	protected void onStart() {
		super.onStart();
		// now create MomentumWeightData objects that will be used during the
		// training to store previous weight value
		int layerCount = this.neuralNetwork.getLayersCount();

		for (Layer layer : this.neuralNetwork.getLayers()) {
			for (Neuron neuron : layer.getNeurons()) {
				for (Connection connection : neuron.getInputConnections())
					connection.getWeight().setTrainingData(
							new MomentumWeightTrainingData());
			} // for
		} // for

	}

	private void applyOffset(double[][] xy) {
		for (int n = 0; n < xy[0].length; n++) {
			if (xy[xy.length - 1][n] == 1) {
				for (int f = xy.length - 1; f >= 0; f--) {
					xy[f][n] = xy[f][n] - 1;
				}
			}

		}

	}

}