﻿using System;
using System.IO;
using System.Drawing;
using System.Collections.Generic;

namespace NewVSS
{
    partial class Network
    {
        public int numInputs, numHidden1, numOutputs, numLayers, numEpochs, errMeasure, weightsLength0, weightsLength1, weightsLength2,
            signaltableL2, signaltableL1, DataSetL1, weightsGetLength1, weightsGetLength2, numVectors, numTestVect, lastColumnContainsOutliers = 0;
        public double dw0 = 0.3, error = 0, accuracy = 0, meanOutlier = 0, offset = 0.997, delta = 0.5, errExp = 2, oldWeightTest, oldError = -1;
        public int[] Layer;
        double[, ,] SignalTableY, SignalTableSumWX, weights, deltaa;
        public double[,] TrainingDataSet;
        public bool classification, RandomWeights = true;
        double[] errorTable;

        public Network(double[,] DataSet,  int numEpochs = 20, int numHidden1 = 5, bool classification = true,
                   bool RandomWeights = true, int errMeasure = 0, double delta = 0.5, double errExp = 2)
        {
            this.numEpochs = numEpochs;
            this.RandomWeights = RandomWeights;
            this.numHidden1 = numHidden1;

            numLayers = 3;

            this.classification = classification;
            this.errMeasure = errMeasure;
            this.delta = delta;
            this.errExp = errExp;
            numInputs = DataSet.GetLength(1) - 1; //last=class/Y, 
            //numVectors = DataSet.GetLength(0);          

            TrainingDataSet = DataSet;
            numVectors = DataSet.GetLength(0);

            if (classification)//How many outputs
            {
                SortedSet<int> Nout = new SortedSet<int>();
                for (int v = 0; v < TrainingDataSet.GetLength(0); v++)
                    Nout.Add((int)TrainingDataSet[v, numInputs]);
                numOutputs = Nout.Count;
            }
            else //regression
                numOutputs = 1;

            this.numEpochs = numEpochs;
            this.numHidden1 = numHidden1;
            CreateNetwork();
        }

        private void CreateNetwork()
        {
            Layer = new int[numLayers];//Layer table gives us information about how many neurons are in each layer
            Layer[0] = numInputs;
            Layer[1] = numHidden1;
            Layer[2] = numOutputs;

            errorTable = new double[numOutputs]; //Table that we use to store errors of each output neuron

            int WeightCounter = numHidden1 + numInputs;
            int maxNeurons = numInputs;
            for (int i = 1; i < numLayers; i++)
                if (maxNeurons < Layer[i])
                    maxNeurons = Layer[i];

            if (maxNeurons + 1 > WeightCounter)
                weights = new double[numLayers, maxNeurons, maxNeurons + 1];
            else
                weights = new double[numLayers, maxNeurons, WeightCounter + 1];
            weightsLength0 = numLayers;
            weightsLength1 = maxNeurons;
            weightsLength2 = weights.GetLength(2); 

            signaltableL2 = maxNeurons;
            signaltableL1 = numLayers;
            DataSetL1 = TrainingDataSet.GetLength(1);
            weightsGetLength1 = weights.GetLength(1);
            weightsGetLength2 = weights.GetLength(2); 

            deltaa = new double[weights.GetLength(0), weights.GetLength(1), weights.GetLength(2)];
            SignalTableY = new double[TrainingDataSet.GetLength(0), numLayers, maxNeurons];
            SignalTableSumWX = new double[TrainingDataSet.GetLength(0), numLayers, maxNeurons];

            string[] WeightsFromFile = { "" };
            if (!RandomWeights)
                WeightsFromFile = File.ReadAllLines(@"../../../datasets/weights.txt");
            int wff = 0;


            Random rnd = new Random();
            for (int L = 1; L < 2; L++)
            {
                for (int n = 0; n < Layer[L]; n++)
                {

                    for (int w = 0; w < Layer[0] + 1; w++)
                    {
                        if (RandomWeights)
                        {
                            weights[L, n, w] = 0.5 - rnd.NextDouble(); //adding random weights
                            deltaa[L, n, w] = dw0; //delta
                        }
                        else
                        {
                            weights[L, n, w] = 0.5 - Double.Parse(WeightsFromFile[wff++], System.Globalization.CultureInfo.InvariantCulture); //adding weights from file
                            deltaa[L, n, w] = dw0; //delta
                        }
                    }
                }

            }
            for (int L = 2; L < 3; L++)
            {
                for (int n = 0; n < Layer[L]; n++)
                {

                    for (int w = 0; w < WeightCounter + 1; w++)
                    {
                        if (RandomWeights)
                        {
                            weights[L, n, w] = 0.5 - rnd.NextDouble(); //adding random weights
                            deltaa[L, n, w] = dw0; //delta
                        }
                        else
                        {
                            weights[L, n, w] = 0.5 - Double.Parse(WeightsFromFile[wff++], System.Globalization.CultureInfo.InvariantCulture); //adding weights from file
                            deltaa[L, n, w] = dw0; //delta
                        }
                    }
                }

            }


        }

        public void VSS() // MLP network learning algorithm: finds the optimal weights that minimize MSE
        // that can be any MLP learning algorithm, I use a simplified version of VSS here
        // just a small value dw is added to each single weight and the error is calculated if the error decreases,
        // w = w + dw, else dw is subtracted t=from that the weight and the error is calculated again
        // and the error is calculated if the error decreases, w = w - dw, otherwise w remains unchanged.
        // This operation is repeated with each weight and the whole iteration is repeated several times until
        // decreases significiantly
        {
            oldError = getError(TrainingDataSet);
            double Error = oldError;
            double dw = dw0;
            unsafe
            {
                fixed (double* p1 = &weights[0, 0, 0], p2 = &deltaa[0, 0, 0])
                {
                    for (int e = 0; e < numEpochs; e++)
                    {
                        if (classification)
                            Console.WriteLine(oldError / numVectors + "    " + accuracy / numVectors);
                        else
                            Console.WriteLine(oldError / numVectors);
                        dw *= 0.995;
                        for (int L = 1; L < numLayers; L++)
                        {
                            int iL = L * weightsGetLength1 * weightsGetLength2;
                            for (int n = 0; n < Layer[L]; n++)
                            {
                                int iN = n * weightsGetLength2;
                                for (int w = 0; w < Layer[L - 1] + 1; w++)
                                {
                                    bool errorDecreased = false;
                                    dw = 0.67 * *(p2 + iL + iN + w);
                                    double oldW = *(p1 + iL + iN + w);
                                    oldWeightTest = *(p1 + iL + iN + w);
                                    *(p1 + iL + iN + w) += dw;
                                    rec(L, n, w);
                                    if ((Error = getError(TrainingDataSet, L, n, w)) < oldError)
                                    {
                                        oldError = Error;
                                        errorDecreased = true;
                                        *(p2 + iL + iN + w) = dw;
                                        oldWeightTest = *(p1 + iL + iN + w);
                                        *(p1 + iL + iN + w) += dw;
                                        rec(L, n, w);
                                        if ((Error = getError(TrainingDataSet, L, n, w)) < oldError)
                                        {
                                            oldError = Error;
                                            *(p2 + iL + iN + w) = 2 * dw;
                                        }
                                        else
                                        {
                                            oldWeightTest = *(p1 + iL + iN + w);
                                            *(p1 + iL + iN + w) -= dw;
                                            rec(L, n, w);
                                        }
                                    }
                                    else
                                    {
                                        oldWeightTest = *(p1 + iL + iN + w);
                                        *(p1 + iL + iN + w) -= 2 * dw;
                                        rec(L, n, w);
                                        if ((Error = getError(TrainingDataSet, L, n, w)) < oldError)
                                        {
                                            oldError = Error;
                                            errorDecreased = true;
                                            oldWeightTest = *(p1 + iL + iN + w);
                                            *(p1 + iL + iN + w) -= dw;
                                            *(p2 + iL + iN + w) = -dw;
                                            rec(L, n, w);
                                            if ((Error = getError(TrainingDataSet, L, n, w)) < oldError)
                                            {
                                                oldError = Error;
                                                *(p2 + iL + iN + w) = -2 * dw;
                                            }
                                            else
                                            {
                                                oldWeightTest = *(p1 + iL + iN + w);
                                                *(p1 + iL + iN + w) += dw;
                                                rec(L, n, w);
                                            }
                                        }
                                        if (!errorDecreased)
                                        {
                                            oldWeightTest = *(p1 + iL + iN + w);
                                            *(p1 + iL + iN + w) = oldW;
                                            rec(L, n, w);
                                            *(p2 + iL + iN + w) = 0.67 * dw;
                                        }
                                    }
                                }//for w
                            }//for n
                        }//for L
                    } //for e
                }
            }
        }

        public double getError(double[,] DataSet1, double errorExponent = 2, int test = 0, bool outliers = false, int errMeasure = 0, double delta = 0.5)
        // all the signals for every training vector get propagated through the network to calculate the error
        {
            double Error = 0;
            int numVect = DataSet1.GetLength(0);
            accuracy = 0;
            int iVDataSet, iVSignal, iLSignal, iLWeight, iNWeights;
            int sL12 = signaltableL1 * signaltableL2;
            int wL12 = weightsLength1 * weightsLength2;
            unsafe
            {
                fixed (double* p1 = &SignalTableSumWX[0, 0, 0], p2 = &SignalTableY[0, 0, 0], p3 = &weights[0, 0, 0], p4 = &DataSet1[0, 0])
                {
                    for (int vect = 0; vect < numVect; vect++)
                    {
                        iVDataSet = DataSetL1 * vect;
                        iVSignal = vect * sL12;
                        for (int n = 0; n < Layer[0]; n++)
                            *(p2 + n + iVSignal) = *(p4 + iVDataSet + n);
                        for (int L = 1; L < 2; L++)
                        {
                            iLWeight = L * wL12;
                            iLSignal = L * signaltableL2;
                            int iL11Signal = (L - 1) * signaltableL2;
                            for (int n = 0; n < Layer[L]; n++)
                            {
                                iNWeights = n * weightsLength2;
                                *(p1 + iVSignal + iLSignal + n) = 0;
                                for (int w = 0; w < Layer[L - 1]; w++)
                                {
                                    *(p1 + iVSignal + iLSignal + n) += *(p3 + iLWeight + iNWeights + w) * *(p2 + iVSignal + iL11Signal + w);
                                }
                                *(p1 + iVSignal + iLSignal + n) += *(p3 + iLWeight + iNWeights + Layer[L - 1]); //bias 
                                *(p2 + iVSignal + iLSignal + n) = Math.Tanh(*(p1 + iVSignal + iLSignal + n));//y
                            }
                        }
                        for (int L = 2; L < 3; L++)
                        {
                            iLWeight = L * wL12;
                            iLSignal = L * signaltableL2;
                            int iL11Signal = (L - 1) * signaltableL2;
                            int iL12Signal = (L - 2) * signaltableL2;
                            for (int n = 0; n < Layer[L]; n++)
                            {
                                iNWeights = n * weightsLength2;
                                *(p1 + iVSignal + iLSignal + n) = 0;
                                for (int w = 0; w < Layer[L - 1]; w++)
                                    *(p1 + iVSignal + iLSignal + n) += *(p3 + iLWeight + iNWeights + w) * *(p2 + iVSignal + iL11Signal + w);
                                for (int w = Layer[L - 1]; w < Layer[L - 2] + Layer[L - 1]; w++)
                                {
                                    int iW2 = (w - Layer[L - 1]);
                                    *(p1 + iVSignal + iLSignal + n) += *(p3 + iLWeight + iNWeights + w) * *(p2 + iVSignal + iL12Signal + iW2);
                                }
                                *(p1 + iVSignal + iLSignal + n) += *(p3 + iLWeight + iNWeights + Layer[L - 1] + Layer[L - 2]); //bias 
                                *(p2 + iVSignal + iLSignal + n) = Math.Tanh(*(p1 + iVSignal + iLSignal + n));//y
                            }
                        }
                    }
                    if (classification)
                    {

                        double maxY, maxN;
                        for (int vect = 0; vect < numVect; vect++) //calculating error for classification
                        {
                            iVDataSet = vect * DataSetL1;
                            iVSignal = vect * sL12;
                            maxY = -1; maxN = -1;
                            int L = numLayers - 1;

                            iLSignal = L * signaltableL2;
                            for (int n = 0; n < Layer[L]; n++)
                            {
                                if (*(p2 + iVSignal + iLSignal + n) > maxY)
                                {
                                    maxY = *(p2 + iVSignal + iLSignal + n);
                                    maxN = n;
                                }
                                int y = (int)*(p4 + iVDataSet + Layer[0]) - 1;
                                //class numbering starts from 1
                                if (n == y)  //N[L][y].Y is expected to be 1;
                                {
                                    if (*(p2 + iVSignal + iLSignal + n) < offset)
                                        Error += Math.Pow(Math.Abs(*(p2 + iVSignal + iLSignal + n) - 1), errorExponent);
                                }
                                else  //N[L][y].Y is expected to be -1;
                                {
                                    if (*(p2 + iVSignal + iLSignal + n) > -offset)
                                        Error += Math.Pow(Math.Abs(*(p2 + iVSignal + iLSignal + n) + 1), errorExponent);
                                }
                            }

                            if (maxN == *(p4 + iVDataSet + Layer[0]) - 1)
                                accuracy++;
                        }
                    }
                    else
                    {


                        for (int vect = 0; vect < numVect; vect++) //calculating error for regression
                        {
                            iVDataSet = vect * DataSetL1;
                            iVSignal = vect * sL12;
                            for (int L = numLayers - 1; L < numLayers; L++)
                            {
                                iLSignal = L * signaltableL2;
                                for (int n = 0; n < Layer[L]; n++)
                                {
                                    *(p2 + iVSignal + iLSignal + n) = *(p1 + iVSignal + iLSignal + n);
                                    Error += Math.Pow(Math.Abs(*(p2 + iVSignal + iLSignal + n) - *(p4 + iVDataSet + Layer[0])), errorExponent);
                                }
                            }
                        }
                    }
                }
            }
            return Error;
        }

        public void rec(int L1, int n1, int w1) //recalculating Y and SumWX for each vector after weight change [only the neuron which weight was changed, and all in the upper layers]
        {
            int sL12 = signaltableL1 * signaltableL2;
            int wL12 = weightsLength1 * weightsLength2;
            int iLWeight, iLSignal, iL111Signal, iL112Signal, iNWeights, iLWeights, iL, iL11;

            unsafe
            {
                fixed (double* p1 = &SignalTableSumWX[0, 0, 0], p2 = &SignalTableY[0, 0, 0], p3 = &weights[0, 0, 0])
                {
                    int iN1weights = n1 * weightsLength2;
                    int iL1weights = L1 * wL12;
                    int iL1Signal = L1 * signaltableL2;
                    int iL11Signal = (L1 - 1) * signaltableL2;
                    for (int vect = 0; vect < TrainingDataSet.GetLength(0); vect++)
                    {
                        int iV = vect * sL12;
                        if (w1 == Layer[L1 - 1])
                        {
                            *(p1 + iV + iL1Signal + n1) += *(p3 + w1 + iN1weights + iL1weights);
                            *(p1 + iV + iL1Signal + n1) -= oldWeightTest;
                        }
                        else
                        {
                            *(p1 + iV + iL1Signal + n1) += *(p3 + w1 + iN1weights + iL1weights) * *(p2 + iV + iL11Signal + w1);
                            *(p1 + iV + iL1Signal + n1) -= oldWeightTest * *(p2 + iV + iL11Signal + w1);
                        }
                        double oldY = *(p2 + iV + iL1Signal + n1);
                        *(p2 + iV + iL1Signal + n1) = Math.Tanh(*(p1 + iV + iL1Signal + n1));
                        if (L1 != numLayers - 1)
                        {
                            for (int L = L1 + 1; L < L1 + 2; L++)
                            {
                                iLWeights = L * wL12;
                                iL = L * signaltableL2;
                                iL11 = (L - 1) * signaltableL2;

                                for (int n = 0; n < Layer[L]; n++)
                                {
                                    iNWeights = n * weightsLength2;
                                    *(p1 + iV + iL + n) += *(p3 + iLWeights + iNWeights + n1) * *(p2 + n1 + iV + iL11);
                                    *(p1 + iV + iL + n) -= *(p3 + iLWeights + iNWeights + n1) * oldY;
                                    *(p2 + iV + iL + n) = Math.Tanh(*(p1 + iV + iL + n)); //y
                                }
                            }
                            for (int L = L1 + 2; L < numLayers; L++)
                            {
                                iLWeight = L * wL12;
                                iLSignal = L * signaltableL2;
                                iL111Signal = (L - 1) * signaltableL2;
                                iL112Signal = (L - 2) * signaltableL2;
                                for (int n = 0; n < Layer[L]; n++)
                                {
                                    iNWeights = n * weightsLength2;
                                    *(p1 + iV + iLSignal + n) = 0;
                                    for (int w = 0; w < Layer[L - 1]; w++)
                                        *(p1 + iV + iLSignal + n) += *(p3 + iLWeight + iNWeights + w) * *(p2 + iV + iL111Signal + w);
                                    for (int w = Layer[L - 1]; w < Layer[L - 2] + Layer[L - 1]; w++)
                                    {
                                        int iW2 = (w - Layer[L - 1]);
                                        *(p1 + iV + iLSignal + n) += *(p3 + iLWeight + iNWeights + w) * *(p2 + iV + iL112Signal + iW2);
                                    }
                                    *(p1 + iV + iLSignal + n) += *(p3 + iLWeight + iNWeights + Layer[L - 1] + Layer[L - 2]); //bias 
                                    *(p2 + iV + iLSignal + n) = Math.Tanh(*(p1 + iV + iLSignal + n));//
                                }
                            }
                        }
                    }
                }
            }
        }
        public double getError(double[,] DataSet1, int L1, int n1, int w1, double errorExponent = 2, int test = 0, bool outliers = false, int errMeasure = 0, double delta = 0.5)
        //calculating network (all signals are stored in SignalTable)
        {
            double Error = 0;
            int numVect = DataSet1.GetLength(0);
            numTestVect = numVect;
            int iVDataSet, iVSignal, iLSignal, iSgn, y;
            int sL12 = signaltableL1 * signaltableL2;
            int wL12 = weightsLength1 * weightsLength2;
            int L = numLayers - 1;
            iLSignal = L * signaltableL2;
            double maxY, maxN;
            accuracy = 0;

            unsafe
            {
                fixed (double* p1 = &SignalTableSumWX[0, 0, 0], p2 = &SignalTableY[0, 0, 0], p3 = &DataSet1[0, 0])
                {
                    if (classification)
                    {

                        for (int vect = 0; vect < numVect; vect++)
                        {
                            iVDataSet = vect * DataSetL1;
                            iVSignal = vect * sL12;
                            maxY = -1; maxN = -1;
                            iSgn = iVSignal + iLSignal;

                            for (int n = 0; n < Layer[L]; n++)
                            {
                                if (*(p2 + iSgn + n) > maxY)
                                {
                                    maxY = *(p2 + iSgn + n);
                                    maxN = n;
                                }
                                y = (int)*(p3 + iVDataSet + Layer[0]) - 1;
                                //class numbering starts from 1
                                if (n == y)  //N[L][y].Y is expected to be 1;
                                {
                                    if (*(p2 + iSgn + n) < offset)
                                        Error += Math.Pow(Math.Abs(*(p2 + iSgn + n) - 1), errorExponent); //calculating error for classification
                                }
                                else  //N[L][y].Y is expected to be -1;
                                {
                                    if (*(p2 + iSgn + n) > -offset)
                                        Error += Math.Pow(Math.Abs(*(p2 + iSgn + n) + 1), errorExponent); //calculating error for classification
                                }
                            }


                            if (maxN == *(p3 + iVDataSet + Layer[0]) - 1)
                                accuracy++;

                        }

                    }
                    else
                    {

                        for (int vect = 0; vect < numVect; vect++)
                        {
                            iVSignal = vect * sL12;
                            iVDataSet = vect * DataSetL1;
                            iSgn = iVSignal + iLSignal;

                            for (int n = 0; n < Layer[L]; n++)
                            {
                                *(p2 + iSgn + n) = *(p1 + iSgn + n);
                                Error += Math.Pow(Math.Abs(*(p2 + iSgn + n) - *(p3 + iVDataSet + Layer[0])), errorExponent);//calculating error for regression
                            }

                        }
                    }

                }
            }
            return Error;
        }
    }
}


