﻿using System;
using System.IO;
using System.Drawing;
using System.Collections.Generic;

namespace MLP
{
    partial class Network
    {



        public void VSS_ST()// MLP network learning algorithm: finds the optimal weights that minimize MSE
        // that can be any MLP learning algorithm, I use a simplified version of VSS here
        // just a small value dw is added to each single weight and the error is calculated if the error decreases,
        // w = w + dw, else dw is subtracted t=from that the weight and the error is calculated again
        // and the error is calculated if the error decreases, w = w - dw, otherwise w remains unchanged.
        // This operation is repeated with each weight and the whole iteration is repeated several times until
        // decreases significiantly
        {
            FillSignalTable(TrainingDataSet);
            oldError = getError_ST(TrainingDataSet, 0, 0, 0, errExp, 0, true, errMeasure, delta);//(TrainingDataSet, 0, 0, 0);
            double Error = oldError;
            double dw = dw0;
          //  bool ex = false;

            bool useBitmap = true;
            if (!useBitmap)
                bitmapWidth = 1;

            Bitmap bitmap = new Bitmap(bitmapWidth, bitmapHeight);
            Pen redPen = new Pen(Color.Red, 3);
            Pen grayPen = new Pen(Color.Gray, 3);
            Pen bluePen = new Pen(Color.Blue, 3);
            Graphics graphics = Graphics.FromImage(bitmap);
            graphics.FillRectangle(new SolidBrush(Color.White), 0, bitmapWidth, 0, bitmapHeight);

            int numWeights = 0, nw = 0;
            for (int L = 1; L < numLayers; L++)               
                numWeights += Layer[L]*Layer[L - 1] + 1;

            double y0 = (double)bitmapHeight/oldError;
          
           

            double yb0 = y0;
            if (!classification)
                y0 /= 2;
            int x, y, old_x = 1, old_y = bitmapHeight, old_yb = 0, yb;
            if (bitmapWidth > 1)
            {
                graphics.DrawLine(grayPen, 0, 0, bitmapWidth, 0);
                graphics.DrawLine(grayPen, 0, bitmapHeight - 1, bitmapWidth, bitmapHeight - 2);
                x = (int)Math.Round((double)((bitmapWidth * nw) / (numWeights * numEpochs)));
                graphics.DrawLine(grayPen, x, 0, x, bitmapHeight);
                for (int e = 1; e <= numEpochs; e++)
                {
                    nw = numWeights * e;
                    x = (int)Math.Round((double)((bitmapWidth * nw) / (numWeights * numEpochs)));
                    graphics.DrawLine(grayPen, x, 0, x, bitmapHeight);
                }
            }
            nw = 0;



            weightFile = new StreamWriter(outputFileName + "_weights.txt");
            signalFile = new StreamWriter(outputFileName + "_signals.txt");
            errorFile = new StreamWriter(outputFileName + "_errors.txt");

            if (saveLearning)
            {
                errorFile.Write("Network: ");
                for (int i = 0; i < numLayers - 1; i++)
                    errorFile.Write(Layer[i] + "-");
                errorFile.WriteLine(Layer[numLayers - 1]);

                if (classification)
                {
                    errorFile.WriteLine("epoch L n w error accuracy");
                    errorFile.WriteLine("0 0 0 0 " + oldError / (Layer[numLayers-1]*numVectors) + " " + accuracy / numVectors);
                }
                else
                {
                    errorFile.WriteLine("epoch L n w error");
                    errorFile.WriteLine("0 0 0 0 " + oldError / numVectors);
                }

                weightFile.Write("Network: ");
                for (int i = 0; i < numLayers - 1; i++)
                    weightFile.Write(Layer[i] + "-");
                weightFile.Write(Layer[numLayers - 1]);
                weightFile.WriteLine("   L/n/w");

                weightFile.Write("epoch");
                for (int L = 1; L < numLayers; L++)
                    for (int n = 0; n < Layer[L]; n++)
                        for (int w = 0; w < Layer[L - 1] + 1; w++)
                            weightFile.Write(" " + L + "/" + n + "/" + w);
                weightFile.WriteLine();

                weightFile.Write("0 ");
                for (int L = 1; L < numLayers; L++)
                    for (int n = 0; n < Layer[L]; n++)
                        for (int w = 0; w < Layer[L - 1] + 1; w++)
                            weightFile.Write(" " + weights[L, n, w]);
                weightFile.WriteLine();

                signalFile.Write("Network: ");
                for (int i = 0; i < numLayers - 1; i++)
                    signalFile.Write(Layer[i] + "-");
                signalFile.Write(Layer[numLayers - 1]);
                signalFile.WriteLine("   L/n");


                signalFile.WriteLine("epoch=0");
                signalFile.Write("vect");
                for (int L = 1; L < numLayers; L++)
                    for (int n = 0; n < Layer[L]; n++)
                        signalFile.Write(" " + L + "/" + n);
                signalFile.WriteLine();

                for (int v = 0; v < numVectors; v++)
                {
                    signalFile.Write(v);
                    for (int L = 1; L < numLayers; L++)
                        for (int n = 0; n < Layer[L]; n++)
                            signalFile.Write(" " + SignalTableY[v, L, n]);
                    signalFile.WriteLine();
                }
            }


            for (int e = 1; e <= numEpochs; e++)
            {


                dw *= 0.995;
                for (int L = 1; L < numLayers; L++)
                {
                    for (int n = 0; n < Layer[L]; n++)
                    {
                        for (int w = 0; w < Layer[L - 1] + 1; w++)
                        {
                            nw++;
                            bool errorDecreased = false;
                            dw = 0.67 * deltaa[L, n, w];
                            double oldW = weights[L, n, w];
                            oldWeightTest = weights[L, n, w];
                            weights[L, n, w] += dw;
                            rec(L, n, w);
                            if ((Error = getError_ST(TrainingDataSet, L, n, w, errExp, 0, true, errMeasure, delta)) < oldError)
                            {
                                oldError = Error;
                                errorDecreased = true;
                                deltaa[L, n, w] = dw;
                                oldWeightTest = weights[L, n, w];
                                weights[L, n, w] += dw;
                                rec(L, n, w);
                                if ((Error = getError_ST(TrainingDataSet, L, n, w, errExp, 0, true, errMeasure, delta)) < oldError)
                                {
                                    oldError = Error;
                                    deltaa[L, n, w] = 2 * dw;
                                }
                                else
                                {
                                    oldWeightTest = weights[L, n, w];
                                    weights[L, n, w] -= dw;
                                    rec(L, n, w);
                                }
                            }
                            else
                            {
                                oldWeightTest = weights[L, n, w];
                                weights[L, n, w] -= 2 * dw;
                                rec(L, n, w);
                                if ((Error = getError_ST(TrainingDataSet, L, n, w, errExp, 0, true, errMeasure, delta)) < oldError)
                                {
                                    oldError = Error;
                                    errorDecreased = true;
                                    oldWeightTest = weights[L, n, w];
                                    weights[L, n, w] -= dw;
                                    deltaa[L, n, w] = -dw;
                                    rec(L, n, w);
                                    if ((Error = getError_ST(TrainingDataSet, L, n, w, errExp, 0, true, errMeasure, delta)) < oldError)
                                    {
                                        oldError = Error;
                                        deltaa[L, n, w] = -2 * dw;
                                    }
                                    else
                                    {
                                        oldWeightTest = weights[L, n, w];
                                        weights[L, n, w] += dw;
                                        rec(L, n, w);
                                    }
                                }
                                if (!errorDecreased)
                                {
                                    oldWeightTest = weights[L, n, w];
                                    weights[L, n, w] = oldW;
                                    rec(L, n, w);
                                    deltaa[L, n, w] = 0.67 * dw;
                                }
                                else if (saveLearning)
                                {
                                    if (classification)
                                        errorFile.WriteLine(e + " " + L + " " + n + " " + w + " " + oldError / (Layer[numLayers - 1] * numVectors) + " " + accuracy / numVectors);
                                    else
                                        errorFile.WriteLine(e + " " + L + " " + n + " " + w + " " + oldError / numVectors);
                                }
                            }

                            if (bitmapWidth > 1)
                            {
                                y = (int)(oldError * y0);
                                x = (int)Math.Round((double)((bitmapWidth * nw) / (numWeights * numEpochs)));
                           
                                    graphics.DrawLine(redPen, old_x - 1, bitmapHeight - old_y, x, bitmapHeight - y);
                                    if (classification)
                                    {
                                        yb = (int)(accuracy * yb0);
                                        graphics.DrawLine(bluePen, old_x - 1, bitmapHeight - old_yb, x, bitmapHeight - yb);
                                        old_yb = yb;
                                    }
                               
                                old_y = y;
                                old_x = x;
                              
                            }


                        }//for w
                    }//for n
                }//for L

                if (saveLearning)
                {

                    weightFile.Write(e);
                    for (int L = 1; L < numLayers; L++)
                        for (int n = 0; n < Layer[L]; n++)
                            for (int w = 0; w < Layer[L - 1] + 1; w++)
                                weightFile.Write(" " + weights[L, n, w]);
                    weightFile.WriteLine();


                    signalFile.WriteLine("epoch=" + e);
                    signalFile.Write("vect");
                    for (int L = 1; L < numLayers; L++)
                        for (int n = 0; n < Layer[L]; n++)
                            signalFile.Write(" " + L + "/" + n);
                    signalFile.WriteLine();

                    for (int v = 0; v < numVectors; v++)
                    {
                        signalFile.Write(v);
                        for (int L = 1; L < numLayers; L++)
                            for (int n = 0; n < Layer[L]; n++)
                                signalFile.Write(" " + SignalTableY[v, L, n]);
                        signalFile.WriteLine();
                    }
                }



            } //for e
           
            if (bitmapWidth > 1)
                bitmap.Save(outputFileName + ".png");

            error = oldError;
        }

        public void FillSignalTable(double[,] DataSet1)
        // all the signals for every training vector get propagated through the network to calculate the error
        {
            //double Error = 0;
            int numVect = DataSet1.GetLength(0);
            // accuracy = 0;
            //all the signals for every vector get propagated for the first time
            for (int vect = 0; vect < numVect; vect++)
            {
                for (int n = 0; n < Layer[0]; n++)
                    SignalTableY[vect, 0, n] = DataSet1[vect, n];

                for (int L = 1; L < numLayers; L++)
                {
                    for (int n = 0; n < Layer[L]; n++)
                    {
                        SignalTableSumWX[vect, L, n] = 0;
                        for (int w = 0; w < Layer[L - 1]; w++)
                            SignalTableSumWX[vect, L, n] += weights[L, n, w] * SignalTableY[vect, L - 1, w];

                        SignalTableSumWX[vect, L, n] += weights[L, n, Layer[L - 1]]; //bias 
                        SignalTableY[vect, L, n] = Math.Tanh(SignalTableSumWX[vect, L, n]);//y
                    }
                }
            }
        }

        public void rec(int L1, int n1, int w1) //recalculating Y and SumWX for each vector after weight change [only the neuron which weight was changed, and all in the upper layers]
        {
            if (sumMedian <= 0)
            {
                for (int vect = 0; vect < TrainingDataSet.GetLength(0); vect++)
                {
                    if (w1 == Layer[L1 - 1])
                    {
                        SignalTableSumWX[vect, L1, n1] += weights[L1, n1, w1];
                        SignalTableSumWX[vect, L1, n1] -= oldWeightTest;
                    }
                    else
                    {
                        SignalTableSumWX[vect, L1, n1] += weights[L1, n1, w1] * SignalTableY[vect, L1 - 1, w1];
                        SignalTableSumWX[vect, L1, n1] -= oldWeightTest * SignalTableY[vect, L1 - 1, w1];
                    }
                    double oldY = SignalTableY[vect, L1, n1];
                    SignalTableY[vect, L1, n1] = Math.Tanh(SignalTableSumWX[vect, L1, n1]);



                    if (L1 != numLayers - 1)
                    {
                        int L = L1 + 1; 
                       
                            for (int n = 0; n < Layer[L]; n++)
                            {
                                SignalTableSumWX[vect, L, n] += weights[L, n, n1] * SignalTableY[vect, L - 1, n1];
                                SignalTableSumWX[vect, L, n] -= weights[L, n, n1] * oldY;
                                SignalTableY[vect, L, n] = Math.Tanh(SignalTableSumWX[vect, L, n]); //y
                            }
                        
                        for (L = L1 + 2; L < numLayers; L++)
                        {
                            for (int n = 0; n < Layer[L]; n++)
                            {
                                SignalTableSumWX[vect, L, n] = 0;
                                for (int w = 0; w < Layer[L - 1]; w++)
                                    SignalTableSumWX[vect, L, n] += weights[L, n, w] * SignalTableY[vect, L - 1, w];

                                SignalTableSumWX[vect, L, n] += weights[L, n, Layer[L - 1]]; //bias 
                                SignalTableY[vect, L, n] = Math.Tanh(SignalTableSumWX[vect, L, n]);//y
                            }
                        }
                    }
                }
            }
            else
            {
                double medianWX, sumMedWX;
                int mid;
                double[] WX;
                
                for (int vect = 0; vect < TrainingDataSet.GetLength(0); vect++)
                {
                    if (w1 == Layer[L1 - 1])
                    {
                        SignalTableSumWX[vect, L1, n1] += weights[L1, n1, w1];
                        SignalTableSumWX[vect, L1, n1] -= oldWeightTest;
                    }
                    else
                    {
                        SignalTableSumWX[vect, L1, n1] += weights[L1, n1, w1] * SignalTableY[vect, L1 - 1, w1];
                        SignalTableSumWX[vect, L1, n1] -= oldWeightTest * SignalTableY[vect, L1 - 1, w1];
                    }
                    double oldY = SignalTableY[vect, L1, n1];

                   
                        WX = new double[Layer[L1 - 1] + 1];  //number of weights in the current neuron
                        for (int w = 0; w < Layer[L1 - 1]; w++)
                        {
                            WX[w] = weights[L1, n1, w] * N[L1 - 1][w].Y;
                        }
                        WX[Layer[L1 - 1]] = weights[L1, n1, Layer[L1 - 1]]; //bias                   

                        Array.Sort(WX);
                        mid = WX.Length / 2;
                        medianWX = (WX.Length % 2 != 0) ? WX[mid] : (WX[mid] + WX[mid + 1]) / 2;
                        sumMedWX = (1 - sumMedian) * SignalTableSumWX[vect, L1, n1] + sumMedian * medianWX;
                        SignalTableY[vect, L1, n1] = Math.Tanh(sumMedWX);

                    if (L1 != numLayers - 1)
                    {
                        int L = L1 + 1;
                        
                            for (int n = 0; n < Layer[L]; n++)
                            {
                                SignalTableSumWX[vect, L, n] += weights[L, n, n1] * SignalTableY[vect, L - 1, n1];
                                SignalTableSumWX[vect, L, n] -= weights[L, n, n1] * oldY;

                                WX = new double[Layer[L - 1] + 1];  //number of weights in the current neuron
                                for (int w = 0; w < Layer[L - 1]; w++)
                                {
                                    WX[w] = weights[L, n, w] * N[L - 1][w].Y;
                                }
                                WX[Layer[L - 1]] = weights[L, n, Layer[L - 1]]; //bias                   

                                Array.Sort(WX);
                                mid = WX.Length / 2;
                                medianWX = (WX.Length % 2 != 0) ? WX[mid] : (WX[mid] + WX[mid + 1]) / 2;
                                sumMedWX = (1 - sumMedian) * SignalTableSumWX[vect, L, n] + sumMedian * medianWX;
                                SignalTableY[vect, L, n] = Math.Tanh(sumMedWX);
                              
                            }
                        
                        for (L = L1 + 2; L < numLayers; L++)
                        {
                            for (int n = 0; n < Layer[L]; n++)
                            {
                                SignalTableSumWX[vect, L, n] = 0;
                                for (int w = 0; w < Layer[L - 1]; w++)
                                    SignalTableSumWX[vect, L, n] += weights[L, n, w] * SignalTableY[vect, L - 1, w];

                                SignalTableSumWX[vect, L, n] += weights[L, n, Layer[L - 1]]; //bias 


                                WX = new double[Layer[L - 1] + 1];  //number of weights in the current neuron
                                for (int w = 0; w < Layer[L1 - 1]; w++)
                                {
                                    WX[w] = weights[L, n1, w] * N[L - 1][w].Y;
                                }
                                WX[Layer[L - 1]] = weights[L, n1, Layer[L - 1]]; //bias                   

                                Array.Sort(WX);
                                mid = WX.Length / 2;
                                medianWX = (WX.Length % 2 != 0) ? WX[mid] : (WX[mid] + WX[mid + 1]) / 2;
                                sumMedWX = (1 - sumMedian) * SignalTableSumWX[vect, L, n] + sumMedian * medianWX;
                                SignalTableY[vect, L, n] = Math.Tanh(sumMedWX);
                            }
                        }
                    }
                }
            }
        }

        public double getError_ST(double[,] DataSet1, int L1, int n1, int w1, double errorExponent = 2, int test = 0, bool outliers = false, int errMeasure = 0, double delta = 0.5)
        //calculating network (all signals are stored in SignalTable)
        {
            double Error = 0;
            int numVect = DataSet1.GetLength(0);
            accuracy = 0;
            double[] errorTable = new double[numVect];
            double prevError = 0;

            if (classification && outlierErrorCoefficiant > 0)
            {
                int L = numLayers - 1;
                double maxY, maxN;
                for (int vect = 0; vect < numVect; vect++)
                {
                    maxY = -1; maxN = -1;

                   // if (outierErrorCoefficiant > 0 && outliers)
                 //   {
                        outlierCoef = DataSet1[vect, numInputs + 1];
                        if (outlierCoef < oc)
                            outlierCoef = oc;
                        outlierCoef = oc + Math.Pow((outlierCoef - oc), 2);
                 //   }

                    for (int n = 0; n < Layer[L]; n++)
                    {
                        if (SignalTableY[vect, L, n] > maxY)
                        {
                            maxY = SignalTableY[vect, L, n];
                            maxN = n;
                        }
                        int y = (int)DataSet1[vect, Layer[0]] - 1; // minClassNumber;
                        //class numbering starts from 1
                        if (n == y)  //N[L][y].Y is expected to be 1;
                        {
                            if (SignalTableY[vect, L, n] < offset)
                                Error += Math.Pow(Math.Abs(SignalTableY[vect, L, n] - 1), errorExponent) / outlierCoef; //calculating network for classification
                        }
                        else  //N[L][y].Y is expected to be -1;
                        {
                            if (SignalTableY[vect, L, n] > -offset)
                                Error += Math.Pow(Math.Abs(SignalTableY[vect, L, n] + 1), errorExponent) / outlierCoef; //calculating network for classification
                        }
                    }

                    if (maxN == DataSet1[vect, Layer[0]] - 1)
                        accuracy++;

                    errorTable[vect] = Error - prevError;
                    prevError = Error;
                }
            }

            else if (classification)
            {
                int L = numLayers - 1;
                double maxY, maxN;
                for (int vect = 0; vect < numVect; vect++)
                {
                    maxY = -1; maxN = -1;

                    for (int n = 0; n < Layer[L]; n++)
                    {
                        if (SignalTableY[vect, L, n] > maxY)
                        {
                            maxY = SignalTableY[vect, L, n];
                            maxN = n;
                        }
                        int y = (int)DataSet1[vect, Layer[0]] - 1;
                        //class numbering starts from 1
                        if (n == y)  //N[L][y].Y is expected to be 1;
                        {
                            if (SignalTableY[vect, L, n] < offset)
                                Error += Math.Pow(Math.Abs(SignalTableY[vect, L, n] - 1), errorExponent); //calculating network for classification
                        }
                        else  //N[L][y].Y is expected to be -1;
                        {
                            if (SignalTableY[vect, L, n] > -offset)
                                Error += Math.Pow(Math.Abs(SignalTableY[vect, L, n] + 1), errorExponent); //calculating network for classification
                        }
                    }

                    if (maxN == DataSet1[vect, Layer[0]] - 1)
                        accuracy++;

                    errorTable[vect] = Error - prevError;
                    prevError = Error;
                }
            }

            else if (!(outlierErrorCoefficiant > 0))
            {
                int L = numLayers - 1;
                for (int vect = 0; vect < numVect; vect++)
                {                    

                    for (int n = 0; n < Layer[L]; n++)
                    {
                        SignalTableY[vect, L, n] = SignalTableSumWX[vect, L, n];
                        Error += Math.Pow(Math.Abs(SignalTableY[vect, L, n] - DataSet1[vect, Layer[0]]), errorExponent); //calculating network for regression
                    }

                    errorTable[vect] = Error - prevError;
                    prevError = Error;

                }
            }

            else
            {
                int L = numLayers - 1;
                for (int vect = 0; vect < numVect; vect++)
                {

                 //   if (outierErrorCoefficiant > 0 && outliers)
                  //  {
                        outlierCoef = DataSet1[vect, numInputs + 1];
                        if (outlierCoef < oc)
                            outlierCoef = oc;
                        outlierCoef = oc + Math.Pow((outlierCoef - oc), 2);
                 //   }
                    for (int n = 0; n < Layer[L]; n++)
                    {
                        SignalTableY[vect, L, n] = SignalTableSumWX[vect, L, n];
                        Error += Math.Pow(Math.Abs(SignalTableY[vect, L, n] - DataSet1[vect, Layer[0]]), errorExponent) / outlierCoef; //calculating network for regression
                    }

                    errorTable[vect] = Error - prevError;
                    prevError = Error;

                }
            }


            if (errMeasure > 0)
            {
                //calculation of the quantile-based or trimmed error
                Array.Sort(errorTable);
                int midErrors = (int)(numVect * delta);
                if (midErrors > numVect)
                    midErrors = numVect;
                if (midErrors < 1)
                    midErrors = 1;

                //trimmed mean: delta smallest errors out of numVect are summed
                if (errMeasure > 1)
                {
                    Error = 0;
                    for (int i = 0; i < midErrors; i++)
                    {
                        Error += errorTable[i];
                    }
                    Error = Error / midErrors;
                }
                //delta-rank quantile error: delta=0.5 is median error
                else
                {
                    //Error = (numVect % 2 != 0) ? errorTable[midErrors - 1] : (errorTable[midErrors - 1] + errorTable[midErrors]) / 2;
                    Error = errorTable[midErrors - 1];

                }
            }
            else if (errMeasure == -1)  // to estimate delta for robust LT learning
            {
                double meanError = 0;
                //Error = 0;
                for (int i = 0; i < numVect; i++)
                {
                    meanError += errorTable[i];
                }
                meanError = meanError / numVect;
                //meanError = Error / numVect;

                //calculating mad
                Error = 0;
                for (int i = 0; i < numVect; i++)
                {
                    Error += Math.Abs(errorTable[i] - meanError);
                }
                Error = Error / numVect;
                Error *= 3;

                int deltaCounter = 0;   //to count errors smaller than 3*mad

                for (int i = 0; i < numVect; i++)
                {
                    if (Math.Abs(errorTable[i]) < Error) deltaCounter++;
                }

                Error = (double)deltaCounter / (double)numVect;
                //if (delta > 1) delta = 1;


                //Array.Sort(errorTable);
            }
            else if (errMeasure == -2)   //to remove outliers for ILMedS learning
            {
                int N = TrainingDataSet.GetLength(0);
                int d = TrainingDataSet.GetLength(1);
                int reducedIndex = 0;
                double sigma = 1.4826 * (1 + 5 / (double)(N - d - 1)) * Math.Sqrt(delta);
                sigma = 2.5 * Math.Pow(sigma, 2); //threshold
                double[,] reducedDataSet;
                reducedDataSet = new double[N, d];


                for (int i = 0; i < N; i++) //remove from the training set patterns with error<sigma
                {
                    if (errorTable[i] < sigma)
                    {
                        for (int j = 0; j < d; j++)
                        {
                            reducedDataSet[reducedIndex, j] = TrainingDataSet[i, j];
                        }
                        reducedIndex++;
                    }

                }
                if (reducedIndex > 1)
                {
                    double[,] finalDataSet = new double[reducedIndex, d];
                    Array.ConstrainedCopy(reducedDataSet, 0, finalDataSet, 0, reducedIndex);
                    TrainingDataSet = finalDataSet;
                }


            }

            return Error;
        }





        public double getErrorST0(double[,] DataSet1, double errorExponent = 2, int test = 0, bool outliers = true, int errMeasure = 0, double delta = 0.5)
        // all the signals for every training vector get propagated through the network to calculate the error
        {
            //  if (productUnits)          
            //      return getErrorPU(DataSet1, L1, n1, w1, errorExponent, test, outliers, errMeasure, delta);

            double Error = 0, prevError = 0, nY;
            int numVect = DataSet1.GetLength(0);
            numTestVect = numVect;
            accuracy = 0;
            double[] errorTable = new double[numVect];

            for (int vect = 0; vect < numVect; vect++)
            {

                for (int n = 0; n < Layer[0]; n++)
                    N[0][n].Y = DataSet1[vect, n];

                double maxY = -1, maxN = -1;
                for (int L = 1; L < numLayers; L++)
                {


                    for (int n = 0; n < Layer[L]; n++)
                    {
                        N[L][n].sumWX = 0;
                       

                        if (sumMedian > 0)
                        {                            
                            double[] WX = new double[Layer[L - 1] + 1];  //number of weights in the current neuron
                            for (int w = 0; w < Layer[L - 1]; w++)
                            {
                                N[L][n].sumWX += weights[L, n, w] * N[L - 1][w].Y;
                                WX[w] = weights[L, n, w] * N[L - 1][w].Y;
                            }

                            N[L][n].sumWX += weights[L, n, Layer[L - 1]]; //bias  
                            WX[Layer[L - 1]] = weights[L, n, Layer[L - 1]]; //bias

                            Array.Sort(WX);
                            int mid = WX.Length / 2;
                            double medianWX = (WX.Length % 2 != 0) ? WX[mid] : (WX[mid] + WX[mid + 1]) / 2;
                            nY = (1 - sumMedian) * N[L][n].sumWX + sumMedian * medianWX;
                        }
                        else
                        {                            
                            for (int w = 0; w < Layer[L - 1]; w++)
                            {
                                N[L][n].sumWX += weights[L, n, w] * N[L - 1][w].Y;                              
                            }
                            N[L][n].sumWX += weights[L, n, Layer[L - 1]]; //bias
                            nY = N[L][n].sumWX;
                        }


                        N[L][n].Y = Math.Tanh(nY);

                        if (L == numLayers - 1)
                        {

                            if (outlierErrorCoefficiant > 0)
                            {
                                outlierCoef = DataSet1[vect, numInputs + 1];

                                if (outlierCoef < oc)
                                    outlierCoef = oc;

                                outlierCoef = oc + Math.Pow((outlierCoef - oc), 2);
                            }


                            if (classification)
                            {
                                if (N[L][n].Y > maxY)
                                {
                                    maxY = N[L][n].Y;
                                    maxN = n;
                                }

                                int y = (int)DataSet1[vect, Layer[0]] - 1;
                                //class numbering starts from 1
                                if (n == y)  //N[L][y].Y is expected to be 1;
                                {
                                    if (N[L][n].Y < offset)
                                        Error += Math.Pow(Math.Abs(N[L][n].Y - 1), errorExponent) / outlierCoef;
                                }
                                else  //N[L][y].Y is expected to be -1;
                                {
                                    if (N[L][n].Y > -offset)
                                        Error += Math.Pow(Math.Abs(N[L][n].Y + 1), errorExponent) / outlierCoef;
                                }

                               // if (test > 0)
                                //    sw.Write("  " + Math.Round(N[L][n].Y, 4));

                            }
                            else
                            {
                                if (outputNeuronLinearForRegression)
                                    N[L][n].Y = nY;

                                Error += Math.Pow(Math.Abs(N[L][n].Y - DataSet1[vect, Layer[0]]), errorExponent) / outlierCoef;
                            }
                        }

                    }

                }
                if (maxN == DataSet1[vect, Layer[0]] - 1) // - lastColumnContainsOutliers)
                    accuracy++;

                // to calculate quantile-based error 
                errorTable[vect] = Error - prevError;
               prevError = Error;

            }
            
            if (errMeasure > 0)
            {
                //calculation of the quantile-based or trimmed error
                Array.Sort(errorTable);
                int midErrors = (int)(numVect * delta);
                if (midErrors > numVect)
                    midErrors = numVect;
                if (midErrors < 1)
                    midErrors = 1;

                //trimmed mean: delta smallest errors out of numVect are summed
                if (errMeasure > 1)
                {
                    Error = 0;
                    for (int i = 0; i < midErrors; i++)
                    {
                        Error += errorTable[i];
                    }
                    Error = Error / midErrors;
                }
                //delta-rank quantile error: delta=0.5 is median error
                else
                {
                    //Error = (numVect % 2 != 0) ? errorTable[midErrors - 1] : (errorTable[midErrors - 1] + errorTable[midErrors]) / 2;
                    Error = errorTable[midErrors - 1];

                }
            }
            else if (errMeasure == -1)  // to estimate delta for robust LT learning
            {
                double meanError = 0;
                //Error = 0;
                for (int i = 0; i < numVect; i++)
                {
                    meanError += errorTable[i];
                }
                meanError = meanError / numVect;
                //meanError = Error / numVect;

                //calculating mad
                Error = 0;
                for (int i = 0; i < numVect; i++)
                {
                    Error += Math.Abs(errorTable[i] - meanError);
                }
                Error = Error / numVect;
                Error *= 3;

                int deltaCounter = 0;   //to count errors smaller than 3*mad

                for (int i = 0; i < numVect; i++)
                {
                    if (Math.Abs(errorTable[i]) < Error) deltaCounter++;
                }

                Error = (double)deltaCounter / (double)numVect;
                //if (delta > 1) delta = 1;


                //Array.Sort(errorTable);
            }
            else if (errMeasure == -2)   //to remove outliers for ILMedS learning
            {
                int N = TrainingDataSet.GetLength(0);
                int d = TrainingDataSet.GetLength(1);
                int reducedIndex = 0;
                double sigma = 1.4826 * (1 + 5 / (double)(N - d - 1)) * Math.Sqrt(delta);
                sigma = 2.5 * Math.Pow(sigma, 2); //threshold
                double[,] reducedDataSet;
                reducedDataSet = new double[N, d];


                for (int i = 0; i < N; i++) //remove from the training set patterns with error<sigma
                {
                    if (errorTable[i] < sigma)
                    {
                        for (int j = 0; j < d; j++)
                        {
                            reducedDataSet[reducedIndex, j] = TrainingDataSet[i, j];
                        }
                        reducedIndex++;
                    }

                }
                if (reducedIndex > 1)
                {
                    double[,] finalDataSet = new double[reducedIndex, d];
                    Array.ConstrainedCopy(reducedDataSet, 0, finalDataSet, 0, reducedIndex);
                    TrainingDataSet = finalDataSet;
                }


            }
            
            return Error;
        }

    }
}









