﻿using System;
using System.Collections.Generic;
using System.Text;

namespace NeuralNetwork
{
    public class BackPropNeuralNet
    {
        private int numInput;
        private int numHidden;
        private int numOutput;

        private float[] inputs;
        private float[][] ihWeights; // input-to-hidden
        private float[] hBiases;
        private float[] hSums;
        private float[] hOutputs;

        private float[][] hoWeights;  // hidden-to-output
        private float[] oBiases;
        private float[] oSums;
        private float[] outputs;

        //private string hActivation; // "log-sigmoid" or "tanh"
        //private string oActivation; // "log-sigmoid" or "tanh"

        private float[] oGrads; // output gradients for back-propagation
        private float[] hGrads; // hidden gradients for back-propagation

        private float[][] ihPrevWeightsDelta;  // for momentum with back-propagation
        private float[] hPrevBiasesDelta;
        private float[][] hoPrevWeightsDelta;
        private float[] oPrevBiasesDelta;

        public BackPropNeuralNet(int numInput, int numHidden, int numOutput)
        {
            this.numInput = numInput;
            this.numHidden = numHidden;
            this.numOutput = numOutput;

            inputs = new float[numInput];
            ihWeights = Helpers.MakeMatrix(numInput, numHidden);
            hBiases = new float[numHidden];
            hSums = new float[numHidden];

            hOutputs = new float[numHidden];
            hoWeights = Helpers.MakeMatrix(numHidden, numOutput);
            oBiases = new float[numOutput];
            oSums = new float[numOutput];
            outputs = new float[numOutput];

            oGrads = new float[numOutput];
            hGrads = new float[numHidden];

            ihPrevWeightsDelta = Helpers.MakeMatrix(numInput, numHidden);
            hPrevBiasesDelta = new float[numHidden];
            hoPrevWeightsDelta = Helpers.MakeMatrix(numHidden, numOutput);
            oPrevBiasesDelta = new float[numOutput];
            InitWeights();
        }

        public void InitWeights()
        {
            // assumes weights[] has order: input-to-hidden wts, hidden biases, hidden-to-output wts, output biases
            int numWeights = (numInput * numHidden) + (numHidden * numOutput) + numHidden + numOutput;
            float[] weights = new float[numWeights];
            Random r = new Random();
            for(int i=0;i<weights.Length;i++)
            {
                do
                {
                    weights[i] = (float)r.NextDouble();
                    if (weights[i] > 0.1)
                        weights[i] = weights[i] / 10;
                } while (weights[i] <= 0 || weights[i] >= 0.03);
            }

            SetWeights(weights);
        }
        public void SetWeights(float[] weights)
        {
            // assumes weights[] has order: input-to-hidden wts, hidden biases, hidden-to-output wts, output biases
            int numWeights = (numInput * numHidden) + (numHidden * numOutput) + numHidden + numOutput;
            if (weights.Length != numWeights)
                throw new Exception("The weights array length: " + weights.Length +
                  " does not match the total number of weights and biases: " + numWeights);

            int k = 0; // points into weights param

            for (int i = 0; i < numInput; ++i)
                for (int j = 0; j < numHidden; ++j)
                    ihWeights[i][j] = weights[k++];

            for (int i = 0; i < numHidden; ++i)
                hBiases[i] = weights[k++];

            for (int i = 0; i < numHidden; ++i)
                for (int j = 0; j < numOutput; ++j)
                    hoWeights[i][j] = weights[k++];

            for (int i = 0; i < numOutput; ++i)
                oBiases[i] = weights[k++];
        }
        public float[] GetWeights()
        {
            int numWeights = (numInput * numHidden) + (numHidden * numOutput) + numHidden + numOutput;
            float[] result = new float[numWeights];
            int k = 0;
            for (int i = 0; i < ihWeights.Length; ++i)
                for (int j = 0; j < ihWeights[0].Length; ++j)
                    result[k++] = ihWeights[i][j];
            for (int i = 0; i < hBiases.Length; ++i)
                result[k++] = hBiases[i];
            for (int i = 0; i < hoWeights.Length; ++i)
                for (int j = 0; j < hoWeights[0].Length; ++j)
                    result[k++] = hoWeights[i][j];
            for (int i = 0; i < oBiases.Length; ++i)
                result[k++] = oBiases[i];
            return result;
        }

        public float[] GetOutputs()
        {
            float[] result = new float[numOutput];
            this.outputs.CopyTo(result, 0);
            return result;
        }

        public float[] ComputeOutputs(float[] xValues)
        {
            if (xValues.Length != numInput)
                throw new Exception("Inputs array length " + inputs.Length + " does not match NN numInput value " + numInput);

            for (int i = 0; i < numHidden; ++i)
                hSums[i] = 0.0f;
            for (int i = 0; i < numOutput; ++i)
                oSums[i] = 0.0f;

            for (int i = 0; i < xValues.Length; ++i) // copy x-values to inputs
                this.inputs[i] = xValues[i];

            for (int j = 0; j < numHidden; ++j)  // compute hidden layer weighted sums
                for (int i = 0; i < numInput; ++i)
                    hSums[j] += this.inputs[i] * ihWeights[i][j];

            for (int i = 0; i < numHidden; ++i)  // add biases to hidden sums
                hSums[i] += hBiases[i];

            for (int i = 0; i < numHidden; ++i)   // apply tanh activation
                hOutputs[i] = HyperTanFunction(hSums[i]);

            for (int j = 0; j < numOutput; ++j)   // compute output layer weighted sums
                for (int i = 0; i < numHidden; ++i)
                    oSums[j] += hOutputs[i] * hoWeights[i][j];

            for (int i = 0; i < numOutput; ++i)  // add biases to output sums
                oSums[i] += oBiases[i];

            for (int i = 0; i < numOutput; ++i)   // apply log-sigmoid activation
                this.outputs[i] = SigmoidFunction(oSums[i]);

            float[] result = new float[numOutput]; // for convenience when calling method
            this.outputs.CopyTo(result, 0);
            return result;
        } // ComputeOutputs

        private static float SigmoidFunction(float x)
        {
            if (x < -45.0) return 0.0f;
            else if (x > 45.0) return 1.0f;
            else return 1.0f / (1.0f + (float)Math.Exp(-x));
        }

        private static float HyperTanFunction(float x)
        {
            if (x < -45.0) return -1.0f;
            else if (x > 45.0) return 1.0f;
            else return (float)Math.Tanh(x);
        }

        public void UpdateWeights(float[] tValues, float learn, float mom) // back-propagation
        {
            // assumes that SetWeights and ComputeOutputs have been called and so inputs and outputs have values
            if (tValues.Length != numOutput)
                throw new Exception("target values not same Length as output in UpdateWeights");

            // 1. compute output gradients. assumes log-sigmoid!
            for (int i = 0; i < oGrads.Length; ++i)
            {
                float derivative = (1 - outputs[i]) * outputs[i]; // derivative of log-sigmoid is y(1-y)
                oGrads[i] = derivative * (tValues[i] - outputs[i]); // oGrad = (1 - O)(O) * (T-O)
            }

            // 2. compute hidden gradients. assumes tanh!
            for (int i = 0; i < hGrads.Length; ++i)
            {
                float derivative = (1 - hOutputs[i]) * (1 + hOutputs[i]); // derivative of tanh is (1-y)(1+y)
                float sum = 0.0f;
                for (int j = 0; j < numOutput; ++j) // each hidden delta is the sum of numOutput terms
                    sum += oGrads[j] * hoWeights[i][j]; // each downstream gradient * outgoing weight
                hGrads[i] = derivative * sum; // hGrad = (1-O)(1+O) * E(oGrads*oWts)
            }

            // 3. update input to hidden weights (gradients must be computed right-to-left but weights can be updated in any order)
            for (int i = 0; i < ihWeights.Length; ++i) // 0..2 (3)
            {
                for (int j = 0; j < ihWeights[0].Length; ++j) // 0..3 (4)
                {
                    float delta = learn * hGrads[j] * inputs[i]; // compute the new delta = "eta * hGrad * input"
                    ihWeights[i][j] += delta; // update
                    ihWeights[i][j] += mom * ihPrevWeightsDelta[i][j]; // add momentum using previous delta. on first pass old value will be 0.0 but that's OK.
                    ihPrevWeightsDelta[i][j] = delta; // save the delta for next time
                }
            }

            // 4. update hidden biases
            for (int i = 0; i < hBiases.Length; ++i)
            {
                float delta = learn * hGrads[i] * 1.0f; // the 1.0 is the constant input for any bias; could leave out
                hBiases[i] += delta;
                hBiases[i] += mom * hPrevBiasesDelta[i];
                hPrevBiasesDelta[i] = delta; // save delta
            }

            // 5. update hidden to output weights
            for (int i = 0; i < hoWeights.Length; ++i)  // 0..3 (4)
            {
                for (int j = 0; j < hoWeights[0].Length; ++j) // 0..1 (2)
                {
                    float delta = learn * oGrads[j] * hOutputs[i];  // hOutputs are inputs to next layer
                    hoWeights[i][j] += delta;
                    hoWeights[i][j] += mom * hoPrevWeightsDelta[i][j];
                    hoPrevWeightsDelta[i][j] = delta;
                }
            }

            // 6. update hidden to output biases
            for (int i = 0; i < oBiases.Length; ++i)
            {
                float delta = learn * oGrads[i] * 1.0f;
                oBiases[i] += delta;
                oBiases[i] += mom * oPrevBiasesDelta[i];
                oPrevBiasesDelta[i] = delta;
            }
        } // UpdateWeights

    } // BackPropNeuralNet
}
