﻿using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;

namespace ANN.Learning {
    public class BackPropagation : ILearning {
        private INeuralNet Network;
        private float _Limit = 0.0f;
        private bool converged = false;

        private float[][] Errors;

        public bool IsConverged {
            get {
                return converged;
            }
        }

        public float Limit {
            get {
                return _Limit;
            }
            set {
                _Limit = value;
            }
        }

        public BackPropagation(INeuralNet network) {
            Network = network;

            // Create error arrays
            Errors = new float[network.LayerCount + 1][]; // + 1 for output layer

            for (int i = 0; i < network.LayerCount; i++) { // for all hidden layers
                Errors[i] = new float[network.NeuronsPerLayer];
            }

            Errors[network.LayerCount] = new float[network.OutputCount]; // for output layer
        }

        public float LearnEpoch(float[][] input, float[][] output) {
            float error = 0.0f;

            for (int i = 0; i < input.Length; i++) {
                error += Learn(input[i], output[i]);
            }

            // Check if we are within acceptable error range
            converged = (error < Limit);

            return error;
        }

        public float Learn(float[] input, float[] output) {
            // Get the output from the network
            Network.Calculate(input);

            // Calculate network error
            float error = CalculateError(output);

            // Update Weights
            UpdateWeights();

            return error;
        }

        public float CalculateError(float[] optimalOutput) {
            float error = 0;

            // Calculate the error for the last layer (the Output layer)
            ILayer layer = Network.Layers[Network.LayerCount];

            for (int i = 0; i < Network.OutputCount; i++) {
                // Get the neurons output
                float output = layer.Neurons[i].Output;

                // Calculate the neurons error
                float e = System.Math.Abs(optimalOutput[i] - output);

                Errors[Network.LayerCount][i] = e;
                error += e;
            }

            // Calculate the error for the hidden layers
            for (int i = Network.LayerCount - 1; i >= 0; i--) {
                // Looping back through the array we calculate the error for each layer
                layer = Network.Layers[i];
                ILayer nextLayer = Network.Layers[i + 1];

                for (int n = 0; n < layer.Neurons.Length; n++) {
                    // For each neuron
                    float e = 0;

                    for (int nn = 0; nn < nextLayer.Neurons.Length; nn++) {
                        // Calculate neuron error by multiplying next neurons error with it's weight
                        e += Errors[i + 1][nn] * nextLayer.Neurons[nn].Weights[n];
                    }

                    Errors[i][n] = e;
                    error += e;
                }
            }

            return error;
        }

        public void UpdateWeights() {
            // The entire network contains the same activation function
            ANN.Math.IActivationFunction function = Network.Layers[0].Neurons[0].Function;

            // Update weights for all layers
            for (int l = 0; l < Network.LayerCount + 1; l++) {
                ILayer layer = Network.Layers[l];
                // Update weights for all neurons on a layer
                for (int n = 0; n < layer.Neurons.Length; n++) {
                    // Update all weights for a single neuron
                    for (int w = 0; w < layer.Neurons[n].Weights.Length; w++) {
                        float oldWeight = layer.Neurons[n].Weights[w];
                        float differentialOfFunction = function.DifferentialFunction(layer.Neurons[n].Sum);
                        //float differentialOfSumProblem = function.Differential(layer.Neurons[n].Sum);
                        float newWeight = oldWeight + (Errors[l][n] * differentialOfFunction) * layer.Neurons[n].Inputs[w];
                        //float newWeight = Errors[l][n] * differentialOfFunction * oldWeight;
                        // maybe usefull to actually update the weight...
                        layer.Neurons[n].Weights[w] = newWeight;
                    }
                }
            }
        }
    }
}
