﻿using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.IO;
using System.Globalization;
using System.Threading;

namespace PracaInz_v0
{
    class MLP
    {

        public int numInputs, numHidden1, numHidden2, numOutputs = 1, numLayers, numEpochs;
        public int[] Layer;
        Neuron[][] N;
        public double[,] TrainingDataSet;
        public double[,] TestDataSet;
        public int[] NextLevelDataSplitter;
        public bool splitError = false;

        public Thread MLPthread;
        int bitmapHeight, bitmapWidth;
        double graphics1Scale = 100;

        //double maxError = 0.05;
        public double[] nY;
        public double avgY;
        bool xyz = false;


        public struct Neuron
        {
            public double[] weight;
            public double[] delta;
            public double sumWX, Y;
        }

        public MLP(double[,] TrainingDataSet, double[,] TestDataSet, int numEpochs = 10, int numHidden1 = 10, int numHidden2 = 0, int bitmapWidth = 0, int bitmapHeight = 0, bool newThread = false)
        {

            this.TrainingDataSet = TrainingDataSet;
            this.TestDataSet = TestDataSet;
            numInputs = TrainingDataSet.GetLength(1) - 2; // 0=output last=vector_number
            this.numEpochs = numEpochs;
            this.numHidden1 = numHidden1;
            this.numHidden2 = numHidden2;
            this.bitmapHeight = bitmapHeight;
            this.bitmapWidth = bitmapWidth;
            graphics1Scale = bitmapWidth / ((numHidden1 + numHidden2 + 1) * numEpochs);
            NextLevelDataSplitter = new int[TrainingDataSet.GetLength(0)];
            nY = new double[TrainingDataSet.GetLength(0)]; //neuron outputs to split the data

            avgY = 0;
            for (int v = 0; v < TrainingDataSet.GetLength(0); v++)
                avgY += TrainingDataSet[v, 0];

            avgY /= TrainingDataSet.GetLength(0);
            VSS(newThread);
        }
        //public bool newThread;

        /*
        public MLP(double[,] TrainingDataSet, double[,] TestDataSet, bool OverloadingParam = true, int numEpochs = 10, int numHidden1 = 10, int numHidden2 = 0, int bitmapWidth = 0, int bitmapHeight = 0, bool newThread = false)
        {
            this.newThread = newThread;
            this.TrainingDataSet = TrainingDataSet;
            this.TestDataSet = TestDataSet;
            numInputs = TrainingDataSet.GetLength(1) - 1;
            this.numEpochs = numEpochs;
            this.numHidden1 = numHidden1;
            this.numHidden2 = numHidden2;
            this.bitmapHeight = bitmapHeight;
            this.bitmapWidth = bitmapWidth;
            graphics1Scale = bitmapWidth / ((numHidden1 + numHidden2 + 1) * numEpochs);
            NextLevelDataSplitter = new int[TrainingDataSet.GetLength(0)];            
        }


                */


        public double[][] getError(double[,] DataSet, double errorExponent, int ovrPar)
        // all the signals for every training vector get propagated through the network to calculate the error
        {
            int numVect = DataSet.GetLength(0);
            int numAttr = DataSet.GetLength(1) - 2;  // 0=output last=vector_number
            double[] Error = new double[DataSet.GetLength(0)], Error2 = new double[DataSet.GetLength(0)];
            for (int vect = 0; vect < numVect; vect++)
            {
                for (int n = 0; n < Layer[0]; n++)
                    N[0][n].Y = DataSet[vect, n + 1];
                for (int L = 1; L < numLayers - 1; L++)
                    for (int n = 0; n < Layer[L]; n++)
                    {
                        N[L][n].sumWX = 0;
                        for (int w = 0; w < N[L][n].weight.Length - 1; w++)
                            N[L][n].sumWX += N[L][n].weight[w] * N[L - 1][w].Y;

                        N[L][n].sumWX += N[L][n].weight[N[L][n].weight.Length - 1]; //bias
                        N[L][n].Y = Math.Tanh(N[L][n].sumWX);
                    }
                N[numLayers - 1][0].sumWX = 0;
                for (int w = 0; w < N[numLayers - 1][0].weight.Length - 1; w++)
                    N[numLayers - 1][0].sumWX += N[numLayers - 1][0].weight[w] * N[numLayers - 2][w].Y;
                N[numLayers - 1][0].sumWX += N[numLayers - 1][0].weight[N[numLayers - 1][0].weight.Length - 1]; //bias
                N[numLayers - 1][0].Y = Math.Tanh(N[numLayers - 1][0].sumWX); // Math.Tanh(N[numLayers - 1][0].sumWX);
                Error[vect] = Math.Pow(Math.Abs((DataSet[vect, 0] - N[numLayers - 1][0].Y)), errorExponent);

                Error2[vect] = Math.Pow(Math.Abs(AreaTanh(DataSet[vect, 0]) - AreaTanh(N[numLayers - 1][0].Y)), errorExponent);
                // here I assume that the last column in DataSet (Wartosci2) is the output Y             
            }
            double[][] E = { Error, Error2 };
            return E;
        }

        public void VSS(bool newThread = false)
        {


            if (newThread)
            {
                MLPthread = new System.Threading.Thread(new System.Threading.ThreadStart(this.VSStraining));
                MLPthread.IsBackground = true;
                MLPthread.Start();
            }
            else
                VSStraining();

        }

        double dw0 = 0.5;

        private void CreateNetwork()
        {


            if (numHidden2 == 0)
                numLayers = 3;
            else
                numLayers = 4;
            Layer = new int[numLayers];
            Layer[0] = numInputs;
            Layer[1] = numHidden1;
            if (numHidden2 < 0)
            {
                Layer[2] = numHidden2;
                Layer[3] = 1;
            }
            else
                Layer[2] = 1;
            N = new Neuron[numLayers][];
            for (int L = 0; L < numLayers; L++)
                N[L] = new Neuron[Layer[L]];
            Random rnd = new Random();
            for (int L = 0; L < numLayers; L++)
                for (int n = 0; n < Layer[L]; n++)
                {
                    if (L == 0)
                    {
                        N[L][n].weight = new double[1];
                        N[L][n].weight[0] = 1;  // the weights in the input layer are for the purpose of incorporating
                        // the apriori determined importance of different attributes
                    }
                    else
                    {
                        N[L][n].weight = new double[Layer[L - 1] + 1]; // +1 for bias
                        N[L][n].delta = new double[Layer[L - 1] + 1]; // +1 for bias
                        for (int w = 0; w < N[L][n].weight.Length; w++)
                        {
                            N[L][n].weight[w] = 1 - 2 * rnd.NextDouble();  // initializing weights with random numbers from -1 to +1
                            N[L][n].delta[w] = dw0;
                        }
                    }
                }

        }





        static double AreaTanh(double x)
        {
            return 0.5 * Math.Log((1 + x) / (1 - x));
        }
        StreamWriter swxyz;
        public double[] getError(double[,] DataSet, double errorExponent, bool fillDataSplitter = false)
        // all the signals for every training vector get propagated through the network to calculate the error
        {

            errorExponent = 6;


            int numVect = DataSet.GetLength(0);
            int numAttr = DataSet.GetLength(1) - 2;  // 0=output last=vector_number
            double Error = 0, Error2 = 0;
            for (int vect = 0; vect < numVect; vect++)
            {
                for (int n = 0; n < Layer[0]; n++)
                    N[0][n].Y = DataSet[vect, n + 1];
                for (int L = 1; L < numLayers - 1; L++)
                    for (int n = 0; n < Layer[L]; n++)
                    {
                        N[L][n].sumWX = 0;
                        for (int w = 0; w < N[L][n].weight.Length - 1; w++)
                            N[L][n].sumWX += N[L][n].weight[w] * N[L - 1][w].Y;

                        N[L][n].sumWX += N[L][n].weight[N[L][n].weight.Length - 1]; //bias
                        N[L][n].Y = Math.Tanh(N[L][n].sumWX);
                    }
                N[numLayers - 1][0].sumWX = 0;
                for (int w = 0; w < N[numLayers - 1][0].weight.Length - 1; w++)
                    N[numLayers - 1][0].sumWX += N[numLayers - 1][0].weight[w] * N[numLayers - 2][w].Y;
                N[numLayers - 1][0].sumWX += N[numLayers - 1][0].weight[N[numLayers - 1][0].weight.Length - 1]; //bias
                N[numLayers - 1][0].Y = Math.Tanh(N[numLayers - 1][0].sumWX); // Math.Tanh(N[numLayers - 1][0].sumWX);

                // if (splitError) //building the three
                nY[vect] = N[numLayers - 1][0].Y;
                //  else
                double e1 = Math.Pow(Math.Abs((DataSet[vect, 0] - N[numLayers - 1][0].Y)), errorExponent);
                Error += e1;

                double e2 = Math.Pow(Math.Abs(AreaTanh(DataSet[vect, 0]) - AreaTanh(N[numLayers - 1][0].Y)), errorExponent);
                Error2 += e2;
                if (xyz)
                    swxyz.WriteLine(e1 + " " + e2);
                // here I assume that the last column in DataSet (Wartosci2) is the output Y             
            }

            Error = Math.Pow(Error / numVect, 1 / errorExponent);
            Error2 = Math.Pow(Error2 / numVect, 1 / errorExponent);
            double[] E = { Error, Error2 };
            return E;
        }


        public double propagateSingleVector(double[] Vector)
        // all the signals for every training vector get propagated through the network to calculate the error
        {

          //  int numAttr = Vector.GetLength(1) -2;
            

            for (int n = 0; n < Layer[0]; n++)
                 N[0][n].Y = Vector[n ];
            for (int L = 1; L < numLayers - 1; L++)
                for (int n = 0; n < Layer[L]; n++)
                {
                    N[L][n].sumWX = 0;
                    for (int w = 0; w < N[L][n].weight.Length - 1; w++)
                        N[L][n].sumWX += N[L][n].weight[w] * N[L - 1][w].Y;

                    N[L][n].sumWX += N[L][n].weight[N[L][n].weight.Length - 1]; //bias
                    N[L][n].Y = Math.Tanh(N[L][n].sumWX);
                }
            N[numLayers - 1][0].sumWX = 0;
            for (int w = 0; w < N[numLayers - 1][0].weight.Length - 1; w++)
                N[numLayers - 1][0].sumWX += N[numLayers - 1][0].weight[w] * N[numLayers - 2][w].Y;
            N[numLayers - 1][0].sumWX += N[numLayers - 1][0].weight[N[numLayers - 1][0].weight.Length - 1]; //bias
            N[numLayers - 1][0].Y = Math.Tanh(N[numLayers - 1][0].sumWX); // Math.Tanh(N[numLayers - 1][0].sumWX);

            return N[numLayers - 1][0].Y;
        }

        public void VSStraining() // MLP network learning algorithm: finds the optimal weights that minimize MSE 
        // that can be any MLP learning algorithm, I use a simplified version of VSS here
        // just a small value dw is added to each single weight and the error is calculated if the error decreases, 
        // w = w + dw, else dw is subtracted t=from thatht weight and the error is calculated again 
        // and the error is calculated if the error decreases, w = w - dw, otherwise w remains unchanged.
        // This operation is repeated with each weight and the whole iteraion is reepeated several times until 
        // decreases significiantly
        {


            CreateNetwork();


            string file = @"MLPtraining_"  + ".txt";
            StreamWriter sw = new StreamWriter(file);

            try
            {
                double[] Errorw = getError(TrainingDataSet, 2);
                double Error = Errorw[0];




                double y0 = bitmapHeight / Error;
                int old_y = bitmapHeight;

                double oldError = Error;
                double dw = 0.32;
                int nr = 0;
                int numNeurons = numHidden1 + numHidden2 + 1;

                sw.WriteLine("e L n w       weight           error");

                for (int e = 1; e <= numEpochs; e++)
                {
                    /*
                    if (e > 4)
                    {
                        dw = 0.16;
                        if (e > 6)
                            dw = 0.08;
                        if (e > 8)
                            dw = 0.04;
                    }
                    */

                    for (int L = 1; L < numLayers; L++)
                    {
                        for (int n = 0; n < Layer[L]; n++)
                        {
                            nr++;
                            for (int w = 0; w < N[L][n].weight.Length; w++)
                            {
                                bool errorDecreased = false;
                                dw = 0.67 * N[L][n].delta[w];
                                double oldW = N[L][n].weight[w];
                                N[L][n].weight[w] += dw;
                                if ((Error = getError(TrainingDataSet, 2)[0]) < oldError)
                                {
                                    oldError = Error;
                                    errorDecreased = true;
                                    N[L][n].delta[w] = dw;
                                    N[L][n].weight[w] += dw;

                                    if ((Error = getError(TrainingDataSet, 2)[0]) < oldError)
                                    {
                                        oldError = Error;
                                        N[L][n].delta[w] = 2 * dw;
                                    }
                                    else
                                        N[L][n].weight[w] -= dw;
                                }
                                else
                                {
                                    N[L][n].weight[w] -= 2 * dw;
                                    if ((Error = getError(TrainingDataSet, 2)[0]) < oldError)
                                    {
                                        oldError = Error;
                                        errorDecreased = true;
                                        N[L][n].weight[w] -= dw;
                                        N[L][n].delta[w] = -dw;

                                        if ((Error = getError(TrainingDataSet, 2)[0]) < oldError)
                                        {
                                            oldError = Error;
                                            N[L][n].delta[w] = -2 * dw;
                                        }
                                        else
                                            N[L][n].weight[w] += dw;


                                    }

                                }
                                if (errorDecreased)
                                    sw.WriteLine(e + " " + L + " " + n + " " + w + " " + " " + N[L][n].weight[w] + " " + oldError);
                                else
                                {
                                    N[L][n].weight[w] = oldW;
                                    N[L][n].delta[w] = 0.67 * dw;
                                }

                            }                            
                        }
                    }
                }
            }

            finally
            {

                sw.Close();


                StreamWriter sw2 = new StreamWriter("MLPweights_"  + ".txt");
                for (int L = 1; L < numLayers; L++)
                    for (int n = 0; n < Layer[L]; n++)
                        for (int w = 0; w < N[L][n].weight.Length; w++)
                            sw2.WriteLine(L + " " + n + " " + w + " " + N[L][n].weight[w]);

                sw2.Close();

                swxyz = new StreamWriter(@"err1.txt");
                xyz = true;
                double[] TrnMSE = getError(TrainingDataSet, 2, true);
                swxyz.Close();
                xyz = false;
                double[] TestMSE = getError(TestDataSet, 2);
                string strError = "TrnMSE = " + TrnMSE[0].ToString() + "  TestMSE = " + TestMSE[0].ToString();
                string strError2 = "TrnMSE = " + TrnMSE[1].ToString() + "  TestMSE = " + TestMSE[1].ToString();



               // MessageBox.Show(strError + "  " + strError2);
                /*
                if (bitmapWidth > 0)
                {
                    //  System.Diagnostics.Process prc = new System.Diagnostics.Process();
                    //  prc.StartInfo.FileName = "notepad.exe";
                    //  prc.StartInfo.Arguments = file;
                    //  prc.Start();
                    MessageBox.Show(strError);
                }
                */

            }
        }
    }
}

