﻿using System;
using System.IO;
using System.Drawing;
using System.Collections.Generic;

namespace MLP
{
    partial class Network
    {
        int weightsLength0, weightsLength1, weightsLength2, signaltableL2, signaltableL1, DataSetL1,
            weightsGetLength1, weightsGetLength2;
        public double[, ,] SignalTableY, SignalTableSumWX, deltaa;
        public double[, ,] weights;        
        double oldWeightTest;
        public int maxNeurons;


        private void CreateNetworkST()
        {
            N = new Neuron[numLayers][];
            for (int L = 0; L < numLayers; L++)
                N[L] = new Neuron[Layer[L]];

            // errorTable = new double[numOutputs]; //Table that we use to store errors of each output neuron

            maxNeurons = numInputs;
            for (int i = 1; i < numLayers; i++)
                if (maxNeurons < Layer[i])
                    maxNeurons = Layer[i];

            weights = new double[numLayers, maxNeurons, maxNeurons + 1];
            deltaa = new double[weights.GetLength(0), weights.GetLength(1), weights.GetLength(2)];
            SignalTableY = new double[numVectors, numLayers, maxNeurons];
            SignalTableSumWX = new double[numVectors, numLayers, maxNeurons];

            weightsLength0 = numLayers;
            weightsLength1 = maxNeurons;
            weightsLength2 = maxNeurons + 1;
            signaltableL2 = maxNeurons;
            signaltableL1 = numLayers;
            DataSetL1 = TrainingDataSet.GetLength(1);
            weightsGetLength1 = weights.GetLength(1);
            weightsGetLength2 = weights.GetLength(2);

            int wff = 1;

            if (!RandomWeights)
            {
                int numW = 1000;
                string[] WeightsFromFile;
                if (!File.Exists("weights.txt"))
                {
                    Random rnd = new Random();
                    WeightsFromFile = new string[numW];
                    for (int i = 0; i < numW; i++)
                        WeightsFromFile[i] = rnd.NextDouble().ToString();
                   
                    File.WriteAllLines("weights.txt", WeightsFromFile);
                }
                else
                    WeightsFromFile = File.ReadAllLines("weights.txt");


                for (int L = 1; L < numLayers; L++)
                    for (int n = 0; n < Layer[L]; n++)
                        for (int w = 0; w < Layer[L - 1] + 1; w++)
                        {
                            weights[L, n, w] = 0.5 - Convert.ToDouble(WeightsFromFile[wff++]); //adding weights from file
                            deltaa[L, n, w] = dw0; //delta
                            if (wff >= numW)
                                wff = 1;
                        }

            }
            else
            {
                Random rnd = new Random();
                for (int L = 1; L < numLayers; L++)
                    for (int n = 0; n < Layer[L]; n++)
                        for (int w = 0; w < Layer[L - 1] + 1; w++)
                        {
                            weights[L, n, w] = 0.5 - rnd.NextDouble(); //adding random weights
                            deltaa[L, n, w] = dw0; //delta
                        }

            }






        }

        public void VSS_ST_PTR() // MLP network learning algorithm: finds the optimal weights that minimize MSE
        // that can be any MLP learning algorithm, I use a simplified version of VSS here
        // just a small value dw is added to each single weight and the error is calculated if the error decreases,
        // w = w + dw, else dw is subtracted t=from that the weight and the error is calculated again
        // and the error is calculated if the error decreases, w = w - dw, otherwise w remains unchanged.
        // This operation is repeated with each weight and the whole iteration is repeated several times until
        // decreases significiantly
        {
            DateTime dt1 = DateTime.Now;
            FillSignalTable_PTR(TrainingDataSet);
            oldError = getError_ST_PTR(TrainingDataSet, 0, 0, 0, errExp, 0, true, errMeasure, delta);//(TrainingDataSet, 0, 0, 0);
            double Error = oldError;
            double dw = dw0;
         //   StreamWriter swS2 = new StreamWriter(outputFileName + "_errors.txt");
         //   swS2.WriteLine("epoch    error    accurcy");
            
            unsafe
            {
                fixed (double* p1 = &weights[0, 0, 0], p2 = &deltaa[0, 0, 0])
                {
                    for (int e = 1; e <= numEpochs; e++)
                    {
                      //  if (classification)
                      //      swS2.WriteLine(e + "    " + oldError / numVectors + "    " + accuracy / numVectors);
                      //  else
                      //      swS2.WriteLine(e + "    " + oldError / numVectors);
                     //   dw *= 0.995;
                        for (int L = 1; L < numLayers; L++)
                        {
                            int iL = L * weightsGetLength1 * weightsGetLength2;
                            for (int n = 0; n < Layer[L]; n++)
                            {
                                int iN = n * weightsGetLength2;
                                for (int w = 0; w < Layer[L - 1] + 1; w++)
                                {
                                    bool errorDecreased = false;
                                    dw = 0.67 * *(p2 + iL + iN + w);
                                    double oldW = *(p1 + iL + iN + w);
                                    oldWeightTest = *(p1 + iL + iN + w);
                                    *(p1 + iL + iN + w) += dw;
                                    rec_PTR(L, n, w);
                                    if ((Error = getError_ST_PTR(TrainingDataSet, L, n, w, errExp, 0, true, errMeasure, delta)) < oldError)
                                    {
                                        oldError = Error;
                                        errorDecreased = true;
                                        *(p2 + iL + iN + w) = dw;
                                        oldWeightTest = *(p1 + iL + iN + w);
                                        *(p1 + iL + iN + w) += dw;
                                        rec_PTR(L, n, w);
                                        if ((Error = getError_ST_PTR(TrainingDataSet, L, n, w, errExp, 0, true, errMeasure, delta)) < oldError)
                                        {
                                            oldError = Error;
                                            *(p2 + iL + iN + w) = 2 * dw;
                                        }
                                        else
                                        {
                                            oldWeightTest = *(p1 + iL + iN + w);
                                            *(p1 + iL + iN + w) -= dw;
                                            rec_PTR(L, n, w);
                                        }
                                    }
                                    else
                                    {
                                        oldWeightTest = *(p1 + iL + iN + w);
                                        *(p1 + iL + iN + w) -= 2 * dw;
                                        rec_PTR(L, n, w);
                                        if ((Error = getError_ST_PTR(TrainingDataSet, L, n, w, errExp, 0, true, errMeasure, delta)) < oldError)
                                        {
                                            oldError = Error;
                                            errorDecreased = true;
                                            oldWeightTest = *(p1 + iL + iN + w);
                                            *(p1 + iL + iN + w) -= dw;
                                            *(p2 + iL + iN + w) = -dw;
                                            rec_PTR(L, n, w);
                                            if ((Error = getError_ST_PTR(TrainingDataSet, L, n, w, errExp, 0, true, errMeasure, delta)) < oldError)
                                            {
                                                oldError = Error;
                                                *(p2 + iL + iN + w) = -2 * dw;
                                            }
                                            else
                                            {
                                                oldWeightTest = *(p1 + iL + iN + w);
                                                *(p1 + iL + iN + w) += dw;
                                                rec_PTR(L, n, w);
                                            }
                                        }
                                        if (!errorDecreased)
                                        {
                                            oldWeightTest = *(p1 + iL + iN + w);
                                            *(p1 + iL + iN + w) = oldW;
                                            rec_PTR(L, n, w);
                                            *(p2 + iL + iN + w) = 0.67 * dw;
                                        }
                                    }
                                }//for w
                            }//for n
                        }//for L
                    } //for e
                }
            }
           // swS2.Close();
            error = oldError;
            
            
        }

        public void FillSignalTable_PTR(double[,] DataSet1)
        // all the signals for every training vector get propagated through the network to calculate the error
        {
            //double Error = 0;
            int numVect = DataSet1.GetLength(0);
            // accuracy = 0;
            int iVDataSet, iVSignal, iLSignal, iLWeight, iNWeights;
            int sL12 = signaltableL1 * signaltableL2;
            int wL12 = weightsLength1 * weightsLength2;
            unsafe
            {
                fixed (double* p1 = &SignalTableSumWX[0, 0, 0], p2 = &SignalTableY[0, 0, 0], p3 = &weights[0, 0, 0], p4 = &DataSet1[0, 0])
                {
                    for (int vect = 0; vect < numVect; vect++)
                    {

                        iVSignal = vect * sL12;
                        iVDataSet = vect * DataSetL1;
                        for (int n = 0; n < Layer[0]; n++)
                            *(p2 + iVSignal + n) = *(p4 + iVDataSet + n);

                        for (int L = 1; L < numLayers; L++)
                        {
                            iLWeight = L * wL12;
                            iLSignal = L * signaltableL2;
                            int iL11Signal = (L - 1) * signaltableL2;
                            for (int n = 0; n < Layer[L]; n++)
                            {
                                iNWeights = n * weightsLength2;
                                *(p1 + iVSignal + iLSignal + n) = 0;
                                for (int w = 0; w < Layer[L - 1]; w++)
                                    *(p1 + iVSignal + iLSignal + n) += *(p3 + iLWeight + iNWeights + w) * *(p2 + iVSignal + iL11Signal + w);

                                *(p1 + iVSignal + iLSignal + n) += *(p3 + iLWeight + iNWeights + Layer[L - 1]); //bias 
                                *(p2 + iVSignal + iLSignal + n) = Math.Tanh(*(p1 + iVSignal + iLSignal + n));//y
                            }
                        }
                    }
                }
            }

        }

        public void rec_PTR(int L1, int n1, int w1) //recalculating Y and SumWX for each vector after weight change [only the neuron which weight was changed, and all in the upper layers]
        {

            int sL12 = signaltableL1 * signaltableL2;
            int wL12 = weightsLength1 * weightsLength2;
            int iLWeight, iLSignal, iL111Signal, iNWeights, iLWeights, iL, iL11;

            unsafe
            {
                fixed (double* p1 = &SignalTableSumWX[0, 0, 0], p2 = &SignalTableY[0, 0, 0], p3 = &weights[0, 0, 0])
                {
                    int iN1weights = n1 * weightsLength2;
                    int iL1weights = L1 * wL12;
                    int iL1Signal = L1 * signaltableL2;
                    int iL11Signal = (L1 - 1) * signaltableL2;
                    for (int vect = 0; vect < TrainingDataSet.GetLength(0); vect++)
                    {
                        int iV = vect * sL12;
                        if (w1 == Layer[L1 - 1])
                        {
                            *(p1 + iV + iL1Signal + n1) += *(p3 + w1 + iN1weights + iL1weights);
                            *(p1 + iV + iL1Signal + n1) -= oldWeightTest;
                        }
                        else
                        {
                            *(p1 + iV + iL1Signal + n1) += *(p3 + w1 + iN1weights + iL1weights) * *(p2 + iV + iL11Signal + w1);
                            *(p1 + iV + iL1Signal + n1) -= oldWeightTest * *(p2 + iV + iL11Signal + w1);
                        }
                        double oldY = *(p2 + iV + iL1Signal + n1);
                        *(p2 + iV + iL1Signal + n1) = Math.Tanh(*(p1 + iV + iL1Signal + n1));
                        if (L1 != numLayers - 1)
                        {
                           int L = L1 + 1; 
                           
                                iLWeights = L * wL12;
                                iL = L * signaltableL2;
                                iL11 = (L - 1) * signaltableL2;

                                for (int n = 0; n < Layer[L]; n++)
                                {
                                    iNWeights = n * weightsLength2;
                                    *(p1 + iV + iL + n) += *(p3 + iLWeights + iNWeights + n1) * *(p2 + n1 + iV + iL11);
                                    *(p1 + iV + iL + n) -= *(p3 + iLWeights + iNWeights + n1) * oldY;
                                    *(p2 + iV + iL + n) = Math.Tanh(*(p1 + iV + iL + n)); //y
                                }
                            
                            for (L = L1 + 2; L < numLayers; L++)
                            {
                                iLWeight = L * wL12;
                                iLSignal = L * signaltableL2;
                                iL111Signal = (L - 1) * signaltableL2;
                                for (int n = 0; n < Layer[L]; n++)
                                {
                                    iNWeights = n * weightsLength2;
                                    *(p1 + iV + iLSignal + n) = 0;
                                    for (int w = 0; w < Layer[L - 1]; w++)
                                        *(p1 + iV + iLSignal + n) += *(p3 + iLWeight + iNWeights + w) * *(p2 + iV + iL111Signal + w);

                                    *(p1 + iV + iLSignal + n) += *(p3 + iLWeight + iNWeights + Layer[L - 1]); //bias 
                                    *(p2 + iV + iLSignal + n) = Math.Tanh(*(p1 + iV + iLSignal + n));
                                }
                            }
                        }
                    }
                }
            }
        }

        public double getError_ST_PTR(double[,] DataSet1, int L1, int n1, int w1, double errorExponent = 2, int test = 0, bool outliers = false, int errMeasure = 0, double delta = 0.5)
        //calculating network (all signals are stored in SignalTable)
        {
            double Error = 0;
            int numVect = DataSet1.GetLength(0);
            numTestVect = numVect;
            int iVDataSet, iVSignal, iLSignal, iSgn, y;
            int sL12 = signaltableL1 * signaltableL2;
            int wL12 = weightsLength1 * weightsLength2;
            int L = numLayers - 1;
            iLSignal = L * signaltableL2;
            double maxY, maxN;
            accuracy = 0;
            double[] errorTable = new double[numVect];
            double prevError = 0;

            unsafe
            {
                fixed (double* p1 = &SignalTableSumWX[0, 0, 0], p2 = &SignalTableY[0, 0, 0], p3 = &DataSet1[0, 0])
                {
                    if (classification && outlierErrorCoefficiant > 0)
                    {

                        for (int vect = 0; vect < numVect; vect++)
                        {
                           // if (outierErrorCoefficiant > 0 && outliers)
                           // {
                                outlierCoef = DataSet1[vect, numInputs + 1];
                                if (outlierCoef < oc)
                                    outlierCoef = oc;
                                outlierCoef = oc + Math.Pow((outlierCoef - oc), 2);
                          //  }

                            iVDataSet = vect * DataSetL1;
                            iVSignal = vect * sL12;
                            maxY = -1; maxN = -1;
                            iSgn = iVSignal + iLSignal;

                            for (int n = 0; n < Layer[L]; n++)
                            {
                                if (*(p2 + iSgn + n) > maxY)
                                {
                                    maxY = *(p2 + iSgn + n);
                                    maxN = n;
                                }
                                y = (int)*(p3 + iVDataSet + Layer[0]) - 1; // minClassNumber;
                                //class numbering starts from 1
                                if (n == y)  //N[L][y].Y is expected to be 1;
                                {
                                    if (*(p2 + iSgn + n) < offset)
                                        Error += Math.Pow(Math.Abs(*(p2 + iSgn + n) - 1), errorExponent) / outlierCoef; //calculating error for classification
                                }
                                else  //N[L][y].Y is expected to be -1;
                                {
                                    if (*(p2 + iSgn + n) > -offset)
                                        Error += Math.Pow(Math.Abs(*(p2 + iSgn + n) + 1), errorExponent) / outlierCoef; //calculating error for classification
                                }
                            }


                            if (maxN == *(p3 + iVDataSet + Layer[0]) - 1)
                                accuracy++;

                            errorTable[vect] = Error - prevError;
                            prevError = Error;

                        }

                    }

                    else if (classification)
                    {

                        for (int vect = 0; vect < numVect; vect++)
                        {
                          

                            iVDataSet = vect * DataSetL1;
                            iVSignal = vect * sL12;
                            maxY = -1; maxN = -1;
                            iSgn = iVSignal + iLSignal;

                            for (int n = 0; n < Layer[L]; n++)
                            {
                                if (*(p2 + iSgn + n) > maxY)
                                {
                                    maxY = *(p2 + iSgn + n);
                                    maxN = n;
                                }
                                y = (int)*(p3 + iVDataSet + Layer[0]) - 1;
                                //class numbering starts from 1
                                if (n == y)  //N[L][y].Y is expected to be 1;
                                {
                                    if (*(p2 + iSgn + n) < offset)
                                        Error += Math.Pow(Math.Abs(*(p2 + iSgn + n) - 1), errorExponent); //calculating error for classification
                                }
                                else  //N[L][y].Y is expected to be -1;
                                {
                                    if (*(p2 + iSgn + n) > -offset)
                                        Error += Math.Pow(Math.Abs(*(p2 + iSgn + n) + 1), errorExponent); //calculating error for classification
                                }
                            }


                            if (maxN == *(p3 + iVDataSet + Layer[0]) - 1)
                                accuracy++;

                            errorTable[vect] = Error - prevError;
                            prevError = Error;

                        }

                    }

                    else if (outlierErrorCoefficiant > 0)
                    {

                        for (int vect = 0; vect < numVect; vect++)
                        {

                          //  if (outierErrorCoefficiant > 0 && outliers)
                          //  {
                                outlierCoef = DataSet1[vect, numInputs + 1];
                                if (outlierCoef < oc)
                                    outlierCoef = oc;
                                outlierCoef = oc + Math.Pow((outlierCoef - oc), 2);
                         //   }

                            iVSignal = vect * sL12;
                            iVDataSet = vect * DataSetL1;
                            iSgn = iVSignal + iLSignal;

                            for (int n = 0; n < Layer[L]; n++)
                            {
                                *(p2 + iSgn + n) = *(p1 + iSgn + n);
                                Error += Math.Pow(Math.Abs(*(p2 + iSgn + n) - *(p3 + iVDataSet + Layer[0])), errorExponent) / outlierCoef;//calculating error for regression
                            }

                            errorTable[vect] = Error - prevError;
                            prevError = Error;

                        }
                    }

                    else 
                    {

                        for (int vect = 0; vect < numVect; vect++)
                        {

                            iVSignal = vect * sL12;
                            iVDataSet = vect * DataSetL1;
                            iSgn = iVSignal + iLSignal;

                            for (int n = 0; n < Layer[L]; n++)
                            {
                                *(p2 + iSgn + n) = *(p1 + iSgn + n);
                                Error += Math.Pow(Math.Abs(*(p2 + iSgn + n) - *(p3 + iVDataSet + Layer[0])), errorExponent);//calculating error for regression
                            }

                            errorTable[vect] = Error - prevError;
                            prevError = Error;

                        }
                    }

                }

            }

            if (errMeasure > 0)
            {
                //calculation of the quantile-based or trimmed error
                Array.Sort(errorTable);
                int midErrors = (int)(numVect * delta);
                if (midErrors > numVect)
                    midErrors = numVect;
                if (midErrors < 1)
                    midErrors = 1;

                //trimmed mean: delta smallest errors out of numVect are summed
                if (errMeasure > 1)
                {
                    Error = 0;
                    for (int i = 0; i < midErrors; i++)
                    {
                        Error += errorTable[i];
                    }
                    Error = Error / midErrors;
                }
                //delta-rank quantile error: delta=0.5 is median error
                else
                {
                    //Error = (numVect % 2 != 0) ? errorTable[midErrors - 1] : (errorTable[midErrors - 1] + errorTable[midErrors]) / 2;
                    Error = errorTable[midErrors - 1];

                }
            }
            else if (errMeasure == -1)  // to estimate delta for robust LT learning
            {
                double meanError = 0;
                //Error = 0;
                for (int i = 0; i < numVect; i++)
                {
                    meanError += errorTable[i];
                }
                meanError = meanError / numVect;
                //meanError = Error / numVect;

                //calculating mad
                Error = 0;
                for (int i = 0; i < numVect; i++)
                {
                    Error += Math.Abs(errorTable[i] - meanError);
                }
                Error = Error / numVect;
                Error *= 3;

                int deltaCounter = 0;   //to count errors smaller than 3*mad

                for (int i = 0; i < numVect; i++)
                {
                    if (Math.Abs(errorTable[i]) < Error) deltaCounter++;
                }

                Error = (double)deltaCounter / (double)numVect;
                //if (delta > 1) delta = 1;


                //Array.Sort(errorTable);
            }
            else if (errMeasure == -2)   //to remove outliers for ILMedS learning
            {
                int N = TrainingDataSet.GetLength(0);
                int d = TrainingDataSet.GetLength(1);
                int reducedIndex = 0;
                double sigma = 1.4826 * (1 + 5 / (double)(N - d - 1)) * Math.Sqrt(delta);
                sigma = 2.5 * Math.Pow(sigma, 2); //threshold
                double[,] reducedDataSet;
                reducedDataSet = new double[N, d];


                for (int i = 0; i < N; i++) //remove from the training set patterns with error<sigma
                {
                    if (errorTable[i] < sigma)
                    {
                        for (int j = 0; j < d; j++)
                        {
                            reducedDataSet[reducedIndex, j] = TrainingDataSet[i, j];
                        }
                        reducedIndex++;
                    }

                }
                if (reducedIndex > 1)
                {
                    double[,] finalDataSet = new double[reducedIndex, d];
                    Array.ConstrainedCopy(reducedDataSet, 0, finalDataSet, 0, reducedIndex);
                    TrainingDataSet = finalDataSet;
                }


            }

            return Error;
        }






    }
}


