package Algorithms;

import NeuronNetworkLibrary.Network;

/**
 * This class represents the implementation of Backpropagation algorithm.
 * @author Zbyszko
 */
public class BackPropagation {

    static private double LEARNING_RATE = 0.2;
    static private double MOMENTUM_RATE = 0.5; //taken from interval: (0,1)

    /**
     * Responsible for calculating the output values of Backpropagation algorithm. 
     * @param n the Neural Network.    
     */
    public static void calculateBackPropagation(Network n) {

        double[] deltas;
        double[] weights;

        /*
         * PL:
         * Oblicz sygnały zwrotne wszystkich neuronów warstw poprzednich sieci
         * propagując te sygnały kolejno wstecz sieci poczynając od warstwy n-1
         * aż do warstwy1 za pomocą podanego wzoru (instrukcja)
         * ---------------------------------------------------------------------
         * ENG:
         * Calculates the return signals of every neurons from previous layers
         * propagating these signal backwards in network starting from n-1 layer 
         * up to first layer using the formula.
         */
        for (int m = n.getNumberOfHiddenLayers() - 1; m >= 0; m--) {


            if (m == n.getNumberOfHiddenLayers() - 1) {
                for (int i = 0; i < n.getHiddenLayers().get(m).size(); i++) {

                    deltas = new double[n.getOutputLayer().size()];
                    weights = new double[n.getOutputLayer().size()];

                    for (int j = 0; j < deltas.length; j++) {
                        deltas[j] = n.getOutputLayer().get(j).getDelta();
                        weights[j] = n.getOutputLayer().get(j).getInputWeights()[i];

                    }
                    n.getHiddenLayers().get(m).get(i).setDelta(weights, deltas);
                }
            } else {
                for (int i = 0; i < n.getHiddenLayers().get(m).size(); i++) {

                    deltas = new double[n.getHiddenLayers().get(m + 1).size()];
                    weights = new double[n.getHiddenLayers().get(m + 1).size()];

                    for (int j = 0; j < deltas.length; j++) {
                        deltas[j] = n.getHiddenLayers().get(m + 1).get(j).getDelta();
                        weights[j] = n.getHiddenLayers().get(m + 1).get(j).getInputWeights()[i];

                    }
                    n.getHiddenLayers().get(m).get(i).setDelta(weights, deltas);
                }
            }

        }

        /*
         * PL:
         * Korzystając z wyznaczonych i zapamietanych wielkosci wyjsc i sygnałów
         * zwrotnych neuronów sieci dokonaj zmiany każdej z wag uczonej sieci
         * według podanego wzoru (instrukcja)
         * -----------------------------------------------------------------------
         * ENG:
         * Using the determinated and saved (stored) outputs and return signals
         * of neurons from the network, make the change of every weights based on
         * formula.
         */

        double[] newWeights;
        double[] prevWeights;

        for (int m = 0; m < n.getOutputLayer().size(); m++) {

            newWeights = n.getOutputLayer().get(m).getInputWeights();

            if (n.getOutputLayer().get(m).getPreviousWeights() == null) {

                n.getOutputLayer().get(m).setPreviousWeights(newWeights.clone());

                for (int p = 0; p < newWeights.length; p++) {
                    newWeights[p] = newWeights[p] + LEARNING_RATE * n.getOutputLayer().get(m).getDelta() * n.getOutputLayer().get(m).getInputValues()[p];
                }

            } else {
                prevWeights = newWeights.clone();
                for (int p = 0; p < newWeights.length; p++) {
                    newWeights[p] = newWeights[p] + LEARNING_RATE * n.getOutputLayer().get(m).getDelta() * n.getOutputLayer().get(m).getInputValues()[p] + MOMENTUM_RATE * (newWeights[p] - n.getOutputLayer().get(m).getPreviousWeights()[p]);
                }
                n.getOutputLayer().get(m).setPreviousWeights(prevWeights);
            }

            n.getOutputLayer().get(m).setInputWeights(newWeights);

        }

        for (int i = 0; i < n.getHiddenLayers().size(); i++) {
            for (int j = 0; j < n.getHiddenLayers().get(i).size(); j++) {

                newWeights = n.getHiddenLayers().get(i).get(j).getInputWeights();

                if (n.getHiddenLayers().get(i).get(j).getPreviousWeights() == null) {

                    n.getHiddenLayers().get(i).get(j).setPreviousWeights(newWeights.clone());
                    for (int p = 0; p < newWeights.length; p++) {
                        newWeights[p] = newWeights[p] + 
                                LEARNING_RATE * n.getHiddenLayers().get(i).get(j).getDelta() * 
                                n.getHiddenLayers().get(i).get(j).getInputValues()[p];
                    }
                } else {
                    prevWeights = newWeights.clone();
                    for (int p = 0; p < newWeights.length; p++) {
                        newWeights[p] = newWeights[p] + LEARNING_RATE * n.getHiddenLayers().get(i).get(j).getDelta() * n.getHiddenLayers().get(i).get(j).getInputValues()[p] + MOMENTUM_RATE * (newWeights[p] - n.getHiddenLayers().get(i).get(j).getPreviousWeights()[p]);
                    }

                    n.getHiddenLayers().get(i).get(j).setPreviousWeights(prevWeights);
                }

                n.getHiddenLayers().get(i).get(j).setInputWeights(newWeights);
            }
        }

    }
}
