﻿using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.IO;

namespace MLP
{
    class ContrastiveDivergence
    {
        public string errorString = "", tmpDir=@"D:\DeepLearning\Software\PythonApplication3\PythonApplication3\tmpFiles7\";
        public bool useRandom = false;
        double[,] DataSet, W, SiSj0, SiSjn, CDpos, CDneg, CD;
        double[,,] CDposVect, CDnegVect; 
        double[] Svis, Shid, Error;
        double sigma = 0.1, A = 1.0 , alpha=0.1, TotalError=0, moment=0.9,  sig = 0.2, epsW = 0.5, epsA  = 0.5;
        int numHiddenNeurons, numVisibleNeurons, numVectors, numAttributes, numEpochs=10, ksteps=5;
        Random R;

        public ContrastiveDivergence(double[,] DataSet, string type)
        {
            this.DataSet = DataSet;
            numVectors = DataSet.GetLength(0);
            numAttributes = DataSet.GetLength(1);
            CreateRBM();
            if (type=="C")
                CalcContCD();

        }

        void CreateRBM()
        {
            numHiddenNeurons = 3;
            numVisibleNeurons = numAttributes;
            W = new double[numVisibleNeurons+1, numHiddenNeurons+1];
            CDpos = new double[numVisibleNeurons, numHiddenNeurons];
            CDneg = new double[numVisibleNeurons, numHiddenNeurons];
            CDposVect = new double[numVectors, numVisibleNeurons, numHiddenNeurons];
            CDnegVect = new double[numVectors, numVisibleNeurons, numHiddenNeurons];
            CD = new double[numVisibleNeurons, numHiddenNeurons];
            Svis = new double[numVisibleNeurons]; //Si
            Shid = new double[numHiddenNeurons];  //Sj
            Error = new double[numVisibleNeurons];
            SiSj0 = new double[numVisibleNeurons, numHiddenNeurons];
            SiSjn = new double[numVisibleNeurons, numHiddenNeurons];


            if (useRandom)
            {
                R = new Random();
                for (int i = 0; i < numVisibleNeurons; i++)
                    for (int j = 0; j < numHiddenNeurons; j++)
                        W[i, j] = 0.2 * (0.5 - R.NextDouble());
            }
            else
            {
                //horizontal: hid+1, vertical: vis+1

                int i = 0;
                StreamReader sr = new StreamReader(tmpDir+"W.txt");
                string theLine;
                while ((theLine = sr.ReadLine()) != null)
                {
                    if (theLine.Trim().Length > 2)
                    {
                        string[] S = theLine.Split(new string[] { " " }, StringSplitOptions.RemoveEmptyEntries);
                        for (int j = 0; j < numHiddenNeurons+1; j++)                            
                            W[i, j] = Double.Parse(S[j], System.Globalization.CultureInfo.InvariantCulture);
                        i++;
                    }
                }
                sr.Close(); 

            }
        }


     






        void CalcContCD()
        {

            errorString = "";
            for (int e = 1  ; e < numEpochs; e++)
            {

                TotalError = 0;
                for (int i = 0; i < numVisibleNeurons; i++)
                    for (int j = 0; j < numHiddenNeurons; j++)
                        for (int v = 0; v < numVectors; v++)
                            {
                                CDposVect[v, i, j] = 0; //wpos
                                CDnegVect[v, i, j] = 0; //wneg
                            }
                
                for (int v = 0; v < numVectors; v++)           
                    OneContVector(v,e);

 
            //At the end compute average of CDpos and CDneg by dividing them by number of data points.
                for (int i = 0; i < numVisibleNeurons; i++)
                    for (int j = 0; j < numHiddenNeurons; j++)
                    {
                        CDpos[i, j] = 0;
                        CDneg[i, j] = 0;
                    }

                for (int v = 0; v < numVectors; v++)
                    for (int i = 0; i < numVisibleNeurons; i++)
                        for (int j = 0; j < numHiddenNeurons; j++)
                        {
                            CDpos[i, j] += CDposVect[v, i, j];
                            CDneg[i, j] += CDposVect[v, i, j];
                        }

                //Compute CD = < Si.Sj >0 - < Si.Sj >n = CDpos - CDneg
                for (int i = 0; i < numVisibleNeurons; i++)
                    for (int j = 0; j < numHiddenNeurons; j++)
                    {
                        CDpos[i, j] /= numVectors;
                        CDneg[i, j] /= numVectors;                  
                        CD[i,j] = CDpos[i, j] - CDneg[i, j];
                    }


                //Update weights and biases W" = W + alpha*CD (biases are just weights to neurons that stay always 1.0)
                for (int i = 0; i < numVisibleNeurons; i++)
                    for (int j = 0; j < numHiddenNeurons; j++)
                    {
                        W[i,j] += alpha *  CD[i, j];
                    }

            //self.dW = self.dW*self.moment + self.epsW * ((wpos-wneg)/size(self.dat) - self.cost*self.W)
            //self.W = self.W + self.dW
            //self.Ahid = self.Ahid + self.epsA*(apos-aneg)/(size(self.dat)*self.Ahid*self.Ahid)


                // Compute some "error function" like sum of squared difference between Si in 1) and Si in 5)
                //e.g between data and reconstruction.
             errorString += (TotalError/(numVectors*numVisibleNeurons*numAttributes)).ToString().Substring(0,9) + "  ";

           }

        }

        void Activ(string who_Layer, int epoch, int vector, int iteration)
        {

            if (useRandom)
            {
                if (who_Layer == "hid")
                {

                    for (int j = 0; j < numHiddenNeurons; j++)
                    {
                        Shid[j] = 0;
                        for (int i = 0; i < numVisibleNeurons; i++)
                            Shid[j] += Svis[i] * W[i, j] + sigma * (2 - 1.72 * (R.NextDouble() + R.NextDouble() + R.NextDouble() + R.NextDouble()));

                        double F = 1.0 / (1.0 + Math.Exp(-A * Shid[j]));
                        Shid[j] = F;
                    }
                }
                else
                {
                    for (int i = 0; i < numVisibleNeurons; i++)
                    {
                        Svis[i] = 0;
                        for (int j = 0; j < numHiddenNeurons; j++)
                            Svis[i] += Shid[j] * W[i, j] + sigma * (2 - 1.72 * (R.NextDouble() + R.NextDouble() + R.NextDouble() + R.NextDouble()));

                        double F = 1.0 / (1.0 + Math.Exp(-A * Svis[i]));
                        Svis[i] = F;
                    }
                }
            }
            else
            {

                string fileName = tmpDir + epoch.ToString() + "\\S" + who_Layer + vector.ToString() + "_" + iteration.ToString() + ".txt";
                if (iteration == -1)
                       fileName = tmpDir + epoch.ToString() + "\\S" + who_Layer + vector.ToString() + "__.txt";
                string[] S = File.ReadAllLines(fileName);

                if (who_Layer == "hid")
                {
                    for (int j = 0; j < numHiddenNeurons; j++)
                    {
                        Shid[j] = Convert.ToDouble(S[j]);
                    }
                }
                else
                {
                    for (int i = 0; i < numVisibleNeurons; i++)
                    {
                        Svis[i] = Convert.ToDouble(S[i]);
                    }
                }

            }

        }


        private void OneContVector(int v, int e)
        {
            //use values of this data point to set state of visible neurons Si
            for (int i = 0; i < numVisibleNeurons; i++)  //without output
                Svis[i] = DataSet[v, i];

 

            //compute Sj for each hidden neuron based on formula above and states of visible neurons Si
            // Let W be the matrix of IxJ (I - number of visible neurons, J - number of hidden neurons) that represents
            //weights between neurons. Each neuron input is provided by connections from all neurons in other layer.
            //Current neuron state S is formed by multiplication of each input by weight, summation over all inputs and application 
            //            of this sum as a argument of nonlinear sigmoidal function:
            //Sj = F( Sum( Si x Wij + N(0,sigma)) ) - here Si are all neurons in given layer plus one bias neuron that stays 
            //            constantly set at 1.0
            //N(0,1) is random number from normal distribution with mean 0.0 and standard deviation sigma (I use sigma=0.2).
            //This nonlinear function in my case is:
            //F = lo + (hi - lo)/(1.0 + exp(-A*Sj))

            //3 Where lo and hi are the lower and higher bound of input values (in my case 0,1), so it
            //becomes: F = 1.0/(1.0 + exp(-A*Sj))
            //A - is some parameter that is determined during the learning process.

            Activ("hid",e,v,-1);



            //4 now Si and Sj values can be used to compute (Si.Sj)0 - here (...) means just values not average
            //Si.Sj is just a multiplication of current activation (state) of neuron I and neuron J (obviously :) ). 
            //Where Si is the state of a visible neuron, and Sj is the state of a hidden neuron.
            for (int i = 0; i < numVisibleNeurons; i++)
                for (int j = 0; j < numHiddenNeurons; j++)
                {
                    SiSj0[i, j] = Svis[i] * Shid[j];
                   // SiSjn[i, j] = Si[i] * Sj[j];
                }

            
            for (int n = 0; n < ksteps; n++)
            {
                            
                //5 on visible neurons compute Si using the Sj computed in step3. This is known as ".reconstruction"
                Activ("vis",e,v,n);

                //6. compute state of hidden neurons Sj again using Si from 5 step.
                Activ("hid",e,v,n);

               // now use Si and Sj to compute (Si.Sj)1 (fig.3)
                for (int i = 0; i < numVisibleNeurons; i++)
                    for (int j = 0; j < numHiddenNeurons; j++)
                        SiSjn[i, j] = Svis[i] * Shid[j];


                for (int i = 0; i < numVisibleNeurons; i++)
                    for (int j = 0; j < numHiddenNeurons; j++)
                    {
                        CDposVect[v, i, j] = 0;
                        CDnegVect[v, i, j] = 0;
                    }

                for (int i = 0; i < numVisibleNeurons; i++)
                    for (int j = 0; j < numHiddenNeurons; j++)
                    {
                        CDposVect[v, i, j] = CDpos[i, j] + SiSj0[i, j];
                        CDnegVect[v, i, j] = CDneg[i, j] + SiSjn[i, j];
                    }

            }
            
           
            for (int i = 0; i < numVisibleNeurons; i++)
                TotalError += Math.Pow((DataSet[v, i] - Svis[i]),2);

        }


        

   

    }
}

/*
 http://imonad.com/rbm/restricted-boltzmann-machine/

 Restricted Boltzmann Machine is a stochastic neural network (that is a network of neurons where each neuron 
 have some random behavior when activated). It consist of one layer of visible units (neurons) and one layer 
 of hidden units. Units in each layer have no connections between them and are connected to all other units 
 in other layer (fig.1). Connections between neurons are bidirectional and symmetric . 
 This means that information flows in both directions during the training and during the usage of the network 
 and that weights are the same in both directions.


RBM Network works in the following way:
First the network is trained by using some data set and setting the neurons on visible layer to match data 
points in this data set. After the network is trained we can use it on new unknown data to make classification 
of the data (this is known as unsupervised learning) Learning Algorithm

Training a RBM is performed by algorithm known as "Contrastive Divergence Learning".

More info on Contrastive Divergence
Let W be the matrix of IxJ (I - number of visible neurons, J - number of hidden neurons) that represents
weights between neurons. Each neuron input is provided by connections from all neurons in other layer.
Current neuron state S is formed by multiplication of each input by weight, summation over all inputs 
and application of this sum as a argument of nonlinear sigmoidal function:
Sj = F( Sum( Si x Wij + N(0,sigma)) ) - here Si are all neurons in given layer plus one bias neuron 
that stays constantly set at 1.0
N(0,1) is random number from normal distribution with mean 0.0 and standard deviation sigma (I use sigma=0.2).
This nonlinear function in my case is:
F = lo + (hi - lo)/(1.0 + exp(-A*Sj))

Where lo and hi are the lower and higher bound of input values (in my case 0,1), so it
becomes: F = 1.0/(1.0 + exp(-A*Sj))
A - is some parameter that is determined during the learning process.

Contrastive divergence is a value that is computed (actually matrix of values) and that is used 
to adjust values of W matrix. Changing W incrementally lead to training of W values.

Let W0 be the initial matrix of weights that are set to some random small values. I use N(0, 0.1) for this.
Let CD = <Si.Sj>0 - <Si.Sj>n  - contrastive divergence
Then on each step (epoch) W is updated to new value W".:
W" = W + alpha*CD
Here alpha is some small step - "learning rate". There exist more complex ways for W update that involve
some "momentum" and "cost" of update to avoid W values to become very large.
Contrastive Divergence explanation

There seems to be big confusion what exactly Contrastive Divergence means and how to implement it.
I have spend a lot of time to understand it.
First of all CD as shown in the formula above is a matrix of size IxJ. So this formula have to be
computed for each combination of I and J.
<...> is a average over each data point in the data set.

Si.Sj is just a multiplication of current activation (state) of neuron I and neuron J (obviously :) ). 
Where Si is the state of a visible neuron, and Sj is the state of a hidden neuron.
Indexes after <...> mean that average is taken after 0 or N-th reconstruction step performed.
fig. 2 Training Restricted Boltzmann Machine

fig. 2 Training Restricted Boltzmann Machine

How is the reconstruction performed?

    get one data point from data set.
    use values of this data point to set state of visible neurons Si
    compute Sj for each hidden neuron based on formula above and states of visible neurons Si
    now Si and Sj values can be used to compute (Si.Sj)0 - here (...) means just values not average
    on visible neurons compute Si using the Sj computed in step3. This is known as ".reconstruction"
    compute state of hidden neurons Sj again using Si from 5 step.
    now use Si and Sj to compute (Si.Sj)1 (fig.3)
    repeating multiple times steps 5,6 and 7 compute (Si.Sj)n. Where n is small number and can increase 
    with learning steps to achieve better accuracy.

The algorithm as a whole is:

    For each epoch do:
        For each data point in data set do:
            CDpos =0, CDneg=0 (matrices)
            perform steps 1...8
            accumulate CDpos = CDpos + (Si.Sj)0
            accumulate CDneg = CDneg + (Si.Sj)n
        At the end compute average of CDpos and CDneg by dividing them by number of data points.
        Compute CD = < Si.Sj >0 - < Si.Sj >n = CDpos - CDneg
        Update weights and biases W" = W + alpha*CD (biases are just weights to neurons that stay always 1.0)
        Compute some "error function" like sum of squared difference between Si in 1) and Si in 5)
        e.g between data and reconstruction.
    repeat for the next epoch until error is small or some fixed number of epoch.

The value of parameter A for visible units stay constant and for hidden unit is adjusted
by the same CD calculation but instead of formula above the following formula is used:
CD = (<Sj.Sj>0 - <Sj.Sj>n)/(Aj.Aj)

In my code i use n=20 initially and gradually increase it to 50.
Most of the steps in the algorithm can be performed by some simple matrix multiplication.
*/