﻿using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.IO;
using AdvUtils;
using System.Threading.Tasks;

namespace LMDecoder
{
    public struct neuron
    {
        public double ac;		//actual value stored in neuron
        public double er;		//error value in neuron, used by learning algorithm
    }

    public struct synapse
    {
        public double weight;	//weight of synapse
    }

    public class vocab_word : IComparable
    {
        public long cn;
        public string word;

        // public double prob;
        public int class_index;

        public vocab_word(string s)
        {
            word = s;
        }

        int IComparable.CompareTo(object obj)
        {
            vocab_word vw = (vocab_word)obj;

            return vw.cn.CompareTo(cn);
        }
    }

    public class RnnLM
    {
        static uint[] PRIMES ={108641969, 116049371, 125925907, 133333309, 145678979, 175308587, 197530793, 234567803, 251851741, 264197411, 330864029, 399999781,
407407183, 459258997, 479012069, 545678687, 560493491, 607407037, 629629243, 656789717, 716048933, 718518067, 725925469, 733332871, 753085943, 755555077,
782715551, 790122953, 812345159, 814814293, 893826581, 923456189, 940740127, 953085797, 985184539, 990122807};

        int PRIMES_SIZE = PRIMES.Length;

        public const int MAX_NGRAM_ORDER = 20;

        Random rand = new Random(DateTime.Now.Millisecond);

        ParallelOptions parallelOption = new ParallelOptions();
        public double lambda;
        double gradient_cutoff;

        public double dynamic;

        public double alpha;
        public double starting_alpha;
        public int alpha_divide;
        public double logp, llogp;
        public double min_improvement;

        public int train_cur_pos;
        public int one_iter;
        public int save_step;

        double beta;

        public int[] history;
        int gen;

        public int alpha_set;
        public RnnLM()
        {
            lambda = 0.75;
            gradient_cutoff = 15;
            dynamic = 0;

            alpha_set = 0;
            alpha = 0.1;
            beta = 0.0000001;
            alpha_divide = 0;
            logp = 0;
            llogp = -100000000;

            min_improvement = 1.003f;

            train_cur_pos = 0;
            vocab_size = 0;

            layer1_size = 30;

            direct_size = 0;
            direct_order = 0;

            bptt = 0;
            bptt_block = 10;

            gen = 0;
            class_size = 100;
            old_classes = 0;
            one_iter = 0;
            history = new int[MAX_NGRAM_ORDER];


        }


        double random(double min, double max)
        {
            return rand.NextDouble() * (max - min) + min;
        }

        public void setClassSize(int newSize) { class_size = newSize; }
        public void setOldClasses(int newVal) { old_classes = newVal; }
        public void setLambda(double newLambda) { lambda = newLambda; }
        public void setGradientCutoff(double newGradient) { gradient_cutoff = newGradient; }
        public void setDynamic(double newD) { dynamic = newD; }
        public void setGen(int newGen) { gen = newGen; }

        public void setLearningRate(double newAlpha) { alpha = newAlpha; }
        public void setRegularization(double newBeta) { beta = newBeta; }
        public void setMinImprovement(double newMinImprovement) { min_improvement = newMinImprovement; }
        public void setHiddenLayerSize(int newsize) { layer1_size = newsize; }
        public void setCompressionLayerSize(int newsize) { layerc_size = newsize; }
        public void setDirectSize(long newsize) { direct_size = newsize; }
        public void setDirectOrder(int newsize) { direct_order = newsize; }
        public void setBPTT(int newval) { bptt = newval; }
        public void setBPTTBlock(int newval) { bptt_block = newval; }
        public void setSaveStep(int newAnti) { save_step = newAnti; }
        public void setOneIter(int newOneIter) { one_iter = newOneIter; }

        public List<vocab_word> vocab = new List<vocab_word>();
        public Dictionary<string, int> vocab_hash = new Dictionary<string, int>();
        public int vocab_size = 0;

        public int layer0_size;
        public int layer1_size;
        public int layerc_size;
        public int layer2_size;

        public neuron[] neu0;		//neurons in input layer
        public neuron[] neu1;		//neurons in hidden layer
        neuron[] neuc;		//neurons in hidden layer
        public neuron[] neu2;		//neurons in output layer


        public synapse[] syn0;		//weights between input and hidden layer
        public synapse[] syn1;		//weights between hidden and output layer (or hidden and compression if compression>0)
        public synapse[] sync;		//weights between hidden and compression layer

        public double[] syn_d;		//direct parameters between input and output layer (similar to Maximum Entropy model parameters)


        public long direct_size;
        public int direct_order;



        //backup used in training:
        neuron[] neu0b;
        neuron[] neu1b;
        neuron[] neucb;
        neuron[] neu2b;

        synapse[] syn0b;
        synapse[] syn1b;
        synapse[] syncb;
        //double[] syn_db;

        //backup used in n-bset rescoring:
        neuron[] neu1b2;



        public int bptt;
        public int bptt_block;
        public int[] bptt_history;
        public neuron[] bptt_hidden;
        synapse[] bptt_syn0;


        public void saveWeights()      //saves current weights and unit activations
        {
            int a, b;

            for (a = 0; a < layer0_size; a++)
            {
                neu0b[a].ac = neu0[a].ac;
                neu0b[a].er = neu0[a].er;
            }

            for (a = 0; a < layer1_size; a++)
            {
                neu1b[a].ac = neu1[a].ac;
                neu1b[a].er = neu1[a].er;
            }

            for (a = 0; a < layerc_size; a++)
            {
                neucb[a].ac = neuc[a].ac;
                neucb[a].er = neuc[a].er;
            }

            for (a = 0; a < layer2_size; a++)
            {
                neu2b[a].ac = neu2[a].ac;
                neu2b[a].er = neu2[a].er;
            }

            for (b = 0; b < layer1_size; b++) for (a = 0; a < layer0_size; a++)
                {
                    syn0b[a + b * layer0_size].weight = syn0[a + b * layer0_size].weight;
                }

            if (layerc_size > 0)
            {
                for (b = 0; b < layerc_size; b++) for (a = 0; a < layer1_size; a++)
                    {
                        syn1b[a + b * layer1_size].weight = syn1[a + b * layer1_size].weight;
                    }

                for (b = 0; b < layer2_size; b++) for (a = 0; a < layerc_size; a++)
                    {
                        syncb[a + b * layerc_size].weight = sync[a + b * layerc_size].weight;
                    }
            }
            else
            {
                for (b = 0; b < layer2_size; b++) for (a = 0; a < layer1_size; a++)
                    {
                        syn1b[a + b * layer1_size].weight = syn1[a + b * layer1_size].weight;
                    }
            }
        }

        public int class_size;
        int[][] class_words;
        int[] class_cn;
        int[] class_max_cn;
        public int old_classes;

        public void initNet()
        {
            int a, b, cl;

            layer0_size = vocab_size + layer1_size;
            layer2_size = vocab_size + class_size;

            neu0 = new neuron[layer0_size];
            neu1 = new neuron[layer1_size];
            neuc = new neuron[layerc_size];
            neu2 = new neuron[layer2_size];

            syn0 = new synapse[layer0_size * layer1_size];
            if (layerc_size == 0)
            {
                syn1 = new synapse[layer1_size * layer2_size];
            }
            else
            {
                syn1 = new synapse[layer1_size * layerc_size];
                sync = new synapse[layerc_size * layer2_size];
            }

            syn_d = new double[direct_size];


            neu0b = new neuron[layer0_size];
            neu1b = new neuron[layer1_size];
            neucb = new neuron[layerc_size];
            neu1b2 = new neuron[layer1_size];
            neu2b = new neuron[layer2_size];

            syn0b = new synapse[layer0_size * layer1_size];
            if (layerc_size == 0)
            {
                syn1b = new synapse[layer1_size * layer2_size];
            }
            else
            {
                syn1b = new synapse[layer1_size * layerc_size];
                syncb = new synapse[layerc_size * layer2_size];
            }



            for (a = 0; a < layer0_size; a++)
            {
                neu0[a].ac = 0;
                neu0[a].er = 0;
            }

            for (a = 0; a < layer1_size; a++)
            {
                neu1[a].ac = 0;
                neu1[a].er = 0;
            }

            for (a = 0; a < layerc_size; a++)
            {
                neuc[a].ac = 0;
                neuc[a].er = 0;
            }

            for (a = 0; a < layer2_size; a++)
            {
                neu2[a].ac = 0;
                neu2[a].er = 0;
            }

            for (b = 0; b < layer1_size; b++)
            {
                for (a = 0; a < layer0_size; a++)
                {
                    syn0[a + b * layer0_size].weight = random(-0.1, 0.1) + random(-0.1, 0.1) + random(-0.1, 0.1);
                }
            }

            if (layerc_size > 0)
            {
                for (b = 0; b < layerc_size; b++)
                {
                    for (a = 0; a < layer1_size; a++)
                    {
                        syn1[a + b * layer1_size].weight = random(-0.1, 0.1) + random(-0.1, 0.1) + random(-0.1, 0.1);
                    }
                }

                for (b = 0; b < layer2_size; b++)
                {
                    for (a = 0; a < layerc_size; a++)
                    {
                        sync[a + b * layerc_size].weight = random(-0.1, 0.1) + random(-0.1, 0.1) + random(-0.1, 0.1);
                    }
                }
            }
            else
            {
                for (b = 0; b < layer2_size; b++)
                {
                    for (a = 0; a < layer1_size; a++)
                    {
                        syn1[a + b * layer1_size].weight = random(-0.1, 0.1) + random(-0.1, 0.1) + random(-0.1, 0.1);
                    }
                }
            }

            for (long aa = 0; aa < direct_size; aa++)
            {
                syn_d[aa] = 0;
            }

            if (bptt > 0)
            {
                bptt_history = new int[bptt + bptt_block + 10];

                for (a = 0; a < bptt + bptt_block; a++)
                {
                    bptt_history[a] = -1;
                }
                //

                bptt_hidden = new neuron[(bptt + bptt_block + 1) * layer1_size];

                for (a = 0; a < (bptt + bptt_block) * layer1_size; a++)
                {
                    bptt_hidden[a].ac = 0;
                    bptt_hidden[a].er = 0;
                }
                //

                bptt_syn0 = new synapse[layer0_size * layer1_size];

            }

            saveWeights();

            double df, dd;
            int i;

            df = 0;
            dd = 0;
            a = 0;
            b = 0;

            if (old_classes != 0)
            {  	
                // old classes
                long b_total = 0;
                for (i = 0; i < vocab_size; i++)
                {
                    b_total += vocab[i].cn;
                }

                for (i = 0; i < vocab_size; i++)
                {
                    df += vocab[i].cn / (double)b_total;
                    if (df > 1) df = 1;
                    if (df > (a + 1) / (double)class_size)
                    {
                        vocab[i].class_index = a;
                        if (a < class_size - 1) a++;
                    }
                    else
                    {
                        vocab[i].class_index = a;
                    }
                }
            }
            else
            {			
                // new classes
                long b_total = 0;
                for (i = 0; i < vocab_size; i++)
                {
                    b_total += vocab[i].cn;
                }

                for (i = 0; i < vocab_size; i++)
                {
                    dd += Math.Sqrt(vocab[i].cn / (double)b_total);
                }

                for (i = 0; i < vocab_size; i++)
                {
                    df += Math.Sqrt(vocab[i].cn / (double)b_total) / dd;
                    if (df > 1) df = 1;
                    if (df > (a + 1) / (double)class_size)
                    {
                        vocab[i].class_index = a;
                        if (a < class_size - 1) a++;
                    }
                    else
                    {
                        vocab[i].class_index = a;
                    }
                }
            }

            //allocate auxiliary class variables (for faster search when normalizing probability at output layer)

            class_words = new int[class_size][];
            class_cn = new int[class_size];
            class_max_cn = new int[class_size];


            for (i = 0; i < class_size; i++)
            {
                class_cn[i] = 0;
                class_max_cn[i] = 10;
                class_words[i] = new int[class_max_cn[i]];
            }

            for (i = 0; i < vocab_size; i++)
            {
                cl = vocab[i].class_index;
                class_words[cl][class_cn[cl]] = i;
                class_cn[cl]++;
                if (class_cn[cl] + 2 >= class_max_cn[cl])
                {
                    class_max_cn[cl] += 10;

                    int[] tmp = new int[class_max_cn[cl]];
                    for (int z = 0; z < class_words[cl].Length; z++)
                    {
                        tmp[z] = class_words[cl][z];
                    }
                    class_words[cl] = tmp;
                }
            }
        }

        public void netFlush()   //cleans all activations and error vectors
        {
            int a;

            for (a = 0; a < layer0_size - layer1_size; a++)
            {
                neu0[a].ac = 0;
                neu0[a].er = 0;
            }

            for (a = layer0_size - layer1_size; a < layer0_size; a++)
            {   //last hidden layer is initialized to vector of 0.1 values to prevent unstability
                neu0[a].ac = 0.1;
                neu0[a].er = 0;
            }

            for (a = 0; a < layer1_size; a++)
            {
                neu1[a].ac = 0;
                neu1[a].er = 0;
            }

            for (a = 0; a < layerc_size; a++)
            {
                neuc[a].ac = 0;
                neuc[a].er = 0;
            }

            for (a = 0; a < layer2_size; a++)
            {
                neu2[a].ac = 0;
                neu2[a].er = 0;
            }
        }


        void matrixXvector(neuron[] dest, neuron[] srcvec, synapse[] srcmatrix, int matrix_width, int from, int to, int from2, int to2, int type)
        {
   //         int a, b;
            //double val1, val2, val3, val4;
            //double val5, val6, val7, val8;

            if (type == 0)
            {		//ac mod
                Parallel.For(0, (to - from) / 8, parallelOption, b =>
                //               for (b = 0; b < (to - from) / 8; b++)
                {
                    double val1 = 0;
                    double val2 = 0;
                    double val3 = 0;
                    double val4 = 0;

                    double val5 = 0;
                    double val6 = 0;
                    double val7 = 0;
                    double val8 = 0;

                    int offset_b = b * 8 + from;
                    for (int a = from2; a < to2; a++)
                    {
                        val1 += srcvec[a].ac * srcmatrix[a + (offset_b + 0) * matrix_width].weight;
                        val2 += srcvec[a].ac * srcmatrix[a + (offset_b + 1) * matrix_width].weight;
                        val3 += srcvec[a].ac * srcmatrix[a + (offset_b + 2) * matrix_width].weight;
                        val4 += srcvec[a].ac * srcmatrix[a + (offset_b + 3) * matrix_width].weight;

                        val5 += srcvec[a].ac * srcmatrix[a + (offset_b + 4) * matrix_width].weight;
                        val6 += srcvec[a].ac * srcmatrix[a + (offset_b + 5) * matrix_width].weight;
                        val7 += srcvec[a].ac * srcmatrix[a + (offset_b + 6) * matrix_width].weight;
                        val8 += srcvec[a].ac * srcmatrix[a + (offset_b + 7) * matrix_width].weight;
                    }
                    dest[offset_b + 0].ac += val1;
                    dest[offset_b + 1].ac += val2;
                    dest[offset_b + 2].ac += val3;
                    dest[offset_b + 3].ac += val4;

                    dest[offset_b + 4].ac += val5;
                    dest[offset_b + 5].ac += val6;
                    dest[offset_b + 6].ac += val7;
                    dest[offset_b + 7].ac += val8;
                }
                );

                int bb = (to - from) / 8;
                for (bb = bb * 8; bb < to - from; bb++)
                {
                    int offset_b = bb + from;
                    for (int a = from2; a < to2; a++)
                    {
                        dest[offset_b].ac += srcvec[a].ac * srcmatrix[a + offset_b * matrix_width].weight;
                    }
                }
            }
            else
            {		//er mod
                Parallel.For(0, (to2 - from2) / 8, parallelOption, a =>
                //               for (a = 0; a < (to2 - from2) / 8; a++)
                {
                    double val1 = 0;
                    double val2 = 0;
                    double val3 = 0;
                    double val4 = 0;

                    double val5 = 0;
                    double val6 = 0;
                    double val7 = 0;
                    double val8 = 0;

                    int offset_a = a * 8 + from2;
                    for (int b = from; b < to; b++)
                    {
                        val1 += srcvec[b].er * srcmatrix[offset_a + 0 + b * matrix_width].weight;
                        val2 += srcvec[b].er * srcmatrix[offset_a + 1 + b * matrix_width].weight;
                        val3 += srcvec[b].er * srcmatrix[offset_a + 2 + b * matrix_width].weight;
                        val4 += srcvec[b].er * srcmatrix[offset_a + 3 + b * matrix_width].weight;

                        val5 += srcvec[b].er * srcmatrix[offset_a + 4 + b * matrix_width].weight;
                        val6 += srcvec[b].er * srcmatrix[offset_a + 5 + b * matrix_width].weight;
                        val7 += srcvec[b].er * srcmatrix[offset_a + 6 + b * matrix_width].weight;
                        val8 += srcvec[b].er * srcmatrix[offset_a + 7 + b * matrix_width].weight;
                    }
                    dest[offset_a + 0].er += val1;
                    dest[offset_a + 1].er += val2;
                    dest[offset_a + 2].er += val3;
                    dest[offset_a + 3].er += val4;

                    dest[offset_a + 4].er += val5;
                    dest[offset_a + 5].er += val6;
                    dest[offset_a + 6].er += val7;
                    dest[offset_a + 7].er += val8;
                }
                );

                int aa = (to2 - from2) / 8;
                for (aa = aa * 8; aa < to2 - from2; aa++)
                {
                    int offset_a = aa + from2;
                    for (int b = from; b < to; b++)
                    {
                        dest[offset_a].er += srcvec[b].er * srcmatrix[offset_a + b * matrix_width].weight;
                    }
                }

                if (gradient_cutoff > 0)
                {
                    for (int a = from2; a < to2; a++)
                    {
                        if (dest[a].er > gradient_cutoff)
                        {
                            dest[a].er = gradient_cutoff;
                        }

                        if (dest[a].er < -gradient_cutoff)
                        {
                            dest[a].er = -gradient_cutoff;
                        }
                    }
                }
            }

        }


		static double[] ExpAdjustment = new double[256] {
			1.040389835,
			1.039159306,
			1.037945888,
			1.036749401,
			1.035569671,
			1.034406528,
			1.033259801,
			1.032129324,
			1.031014933,
			1.029916467,
			1.028833767,
			1.027766676,
			1.02671504,
			1.025678708,
			1.02465753,
			1.023651359,
			1.022660049,
			1.021683458,
			1.020721446,
			1.019773873,
			1.018840604,
			1.017921503,
			1.017016438,
			1.016125279,
			1.015247897,
			1.014384165,
			1.013533958,
			1.012697153,
			1.011873629,
			1.011063266,
			1.010265947,
			1.009481555,
			1.008709975,
			1.007951096,
			1.007204805,
			1.006470993,
			1.005749552,
			1.005040376,
			1.004343358,
			1.003658397,
			1.002985389,
			1.002324233,
			1.001674831,
			1.001037085,
			1.000410897,
			0.999796173,
			0.999192819,
			0.998600742,
			0.998019851,
			0.997450055,
			0.996891266,
			0.996343396,
			0.995806358,
			0.995280068,
			0.99476444,
			0.994259393,
			0.993764844,
			0.993280711,
			0.992806917,
			0.992343381,
			0.991890026,
			0.991446776,
			0.991013555,
			0.990590289,
			0.990176903,
			0.989773325,
			0.989379484,
			0.988995309,
			0.988620729,
			0.988255677,
			0.987900083,
			0.987553882,
			0.987217006,
			0.98688939,
			0.98657097,
			0.986261682,
			0.985961463,
			0.985670251,
			0.985387985,
			0.985114604,
			0.984850048,
			0.984594259,
			0.984347178,
			0.984108748,
			0.983878911,
			0.983657613,
			0.983444797,
			0.983240409,
			0.983044394,
			0.982856701,
			0.982677276,
			0.982506066,
			0.982343022,
			0.982188091,
			0.982041225,
			0.981902373,
			0.981771487,
			0.981648519,
			0.981533421,
			0.981426146,
			0.981326648,
			0.98123488,
			0.981150798,
			0.981074356,
			0.981005511,
			0.980944219,
			0.980890437,
			0.980844122,
			0.980805232,
			0.980773726,
			0.980749562,
			0.9807327,
			0.9807231,
			0.980720722,
			0.980725528,
			0.980737478,
			0.980756534,
			0.98078266,
			0.980815817,
			0.980855968,
			0.980903079,
			0.980955475,
			0.981017942,
			0.981085714,
			0.981160303,
			0.981241675,
			0.981329796,
			0.981424634,
			0.981526154,
			0.981634325,
			0.981749114,
			0.981870489,
			0.981998419,
			0.982132873,
			0.98227382,
			0.982421229,
			0.982575072,
			0.982735318,
			0.982901937,
			0.983074902,
			0.983254183,
			0.983439752,
			0.983631582,
			0.983829644,
			0.984033912,
			0.984244358,
			0.984460956,
			0.984683681,
			0.984912505,
			0.985147403,
			0.985388349,
			0.98563532,
			0.98588829,
			0.986147234,
			0.986412128,
			0.986682949,
			0.986959673,
			0.987242277,
			0.987530737,
			0.987825031,
			0.988125136,
			0.98843103,
			0.988742691,
			0.989060098,
			0.989383229,
			0.989712063,
			0.990046579,
			0.990386756,
			0.990732574,
			0.991084012,
			0.991441052,
			0.991803672,
			0.992171854,
			0.992545578,
			0.992924825,
			0.993309578,
			0.993699816,
			0.994095522,
			0.994496677,
			0.994903265,
			0.995315266,
			0.995732665,
			0.996155442,
			0.996583582,
			0.997017068,
			0.997455883,
			0.99790001,
			0.998349434,
			0.998804138,
			0.999264107,
			0.999729325,
			1.000199776,
			1.000675446,
			1.001156319,
			1.001642381,
			1.002133617,
			1.002630011,
			1.003131551,
			1.003638222,
			1.00415001,
			1.004666901,
			1.005188881,
			1.005715938,
			1.006248058,
			1.006785227,
			1.007327434,
			1.007874665,
			1.008426907,
			1.008984149,
			1.009546377,
			1.010113581,
			1.010685747,
			1.011262865,
			1.011844922,
			1.012431907,
			1.013023808,
			1.013620615,
			1.014222317,
			1.014828902,
			1.01544036,
			1.016056681,
			1.016677853,
			1.017303866,
			1.017934711,
			1.018570378,
			1.019210855,
			1.019856135,
			1.020506206,
			1.02116106,
			1.021820687,
			1.022485078,
			1.023154224,
			1.023828116,
			1.024506745,
			1.025190103,
			1.02587818,
			1.026570969,
			1.027268461,
			1.027970647,
			1.02867752,
			1.029389072,
			1.030114973,
			1.030826088,
			1.03155163,
			1.032281819,
			1.03301665,
			1.033756114,
			1.034500204,
			1.035248913,
			1.036002235,
			1.036760162,
			1.037522688,
			1.038289806,
			1.039061509,
			1.039837792,
			1.040618648
		};

        static double FAST_EXP(double x)
		{
			var tmp = (long)(1512775 * x + 1072632447);
			int index = (int)(tmp >> 12) & 0xFF;
			return BitConverter.Int64BitsToDouble(tmp << 32) * ExpAdjustment[index];
		}

        public void computeNet(int last_word, int word)
        {
            int a, b, c;
            double val;
            double sum;   //sum is used for normalization: it's better to have larger precision as many numbers are summed together here

            if (last_word != -1)
            {
                neu0[last_word].ac = 1;
            }

            //propagate 0->1
            for (a = 0; a < layer1_size; a++)
            {
                neu1[a].ac = 0;
            }
            for (a = 0; a < layerc_size; a++)
            {
                neuc[a].ac = 0;
            }

            matrixXvector(neu1, neu0, syn0, layer0_size, 0, layer1_size, layer0_size - layer1_size, layer0_size, 0);

            for (a = 0; a < layer1_size; a++)
            {
                if (last_word != -1)
                {
                    neu1[a].ac += neu0[last_word].ac * syn0[last_word + a * layer0_size].weight;
                }
            }

            //activate 1      --sigmoid
            for (a = 0; a < layer1_size; a++)
            {
                if (neu1[a].ac > 50) neu1[a].ac = 50;  //for numerical stability
                if (neu1[a].ac < -50) neu1[a].ac = -50;  //for numerical stability
                val = -neu1[a].ac;
                neu1[a].ac = 1 / (1 + FAST_EXP(val));
            }

            if (layerc_size > 0)
            {
                matrixXvector(neuc, neu1, syn1, layer1_size, 0, layerc_size, 0, layer1_size, 0);
                //activate compression      --sigmoid
                for (a = 0; a < layerc_size; a++)
                {
                    if (neuc[a].ac > 50) neuc[a].ac = 50;  //for numerical stability
                    if (neuc[a].ac < -50) neuc[a].ac = -50;  //for numerical stability
                    val = -neuc[a].ac;
                    neuc[a].ac = 1 / (1 + FAST_EXP(val));
                }
            }

            //1->2 class
            for (b = vocab_size; b < layer2_size; b++)
            {
                neu2[b].ac = 0;
            }

            if (layerc_size > 0)
            {
                matrixXvector(neu2, neuc, sync, layerc_size, vocab_size, layer2_size, 0, layerc_size, 0);
            }
            else
            {
                matrixXvector(neu2, neu1, syn1, layer1_size, vocab_size, layer2_size, 0, layer1_size, 0);
            }

            //apply direct connections to classes
            if (direct_size > 0)
            {
                long[] hash = new long[MAX_NGRAM_ORDER];	//this will hold pointers to syn_d that contains hash parameters

                for (a = 0; a < direct_order; a++)
                {
                    hash[a] = 0;
                }

                for (a = 0; a < direct_order; a++)
                {
                    b = 0;
                    if (a > 0)
                    {
                        if (history[a - 1] == -1)
                        {
                            break;	//if OOV was in history, do not use this N-gram feature and higher orders
                        }
                    }
                    hash[a] = PRIMES[0] * PRIMES[1];

                    for (b = 1; b <= a; b++)
                    {
                        hash[a] += PRIMES[(a * PRIMES[b] + b) % PRIMES_SIZE] * (long)(history[b - 1] + 1);	//update hash value based on words from the history
                    }
                    hash[a] = hash[a] % (direct_size / 2);		//make sure that starting hash index is in the first half of syn_d (second part is reserved for history->words features)
                }

                for (a = vocab_size; a < layer2_size; a++)
                {
                    for (b = 0; b < direct_order; b++)
                    {
                        if (hash[b] != 0)
                        {
                            neu2[a].ac += syn_d[hash[b]];		//apply current parameter and move to the next one
                            hash[b]++;
                        }
                        else
                        {
                            break;
                        }
                    }
                }
            }

            //activation 2   --softmax on classes
            sum = 0;
            for (a = vocab_size; a < layer2_size; a++)
            {
                if (neu2[a].ac > 50) neu2[a].ac = 50;  //for numerical stability
                if (neu2[a].ac < -50) neu2[a].ac = -50;  //for numerical stability
                val = FAST_EXP(neu2[a].ac);
                sum += val;
                neu2[a].ac = val;
            }
            for (a = vocab_size; a < layer2_size; a++)
            {
                neu2[a].ac /= sum;         //output layer activations now sum exactly to 1
            }

            if (gen > 0)
            {
                return;	//if we generate words, we don't know what current word is -> only classes are estimated and word is selected in testGen()
            }

            //1->2 word

            int word_class_index = vocab[word].class_index;
            int word_class_cn = class_cn[word_class_index];
            int[] word_class_words = class_words[word_class_index];

            if (word != -1)
            {
                for (c = 0; c < word_class_cn; c++)
                {
                    neu2[word_class_words[c]].ac = 0;
                }
                
                if (layerc_size > 0)
                {
                    matrixXvector(neu2, neuc, sync, layerc_size, word_class_words[0], word_class_words[0] + word_class_cn, 0, layerc_size, 0);
                }
                else
                {
                    matrixXvector(neu2, neu1, syn1, layer1_size, word_class_words[0], word_class_words[0] + word_class_cn, 0, layer1_size, 0);
                }
            }

            //apply direct connections to words
            if (word != -1)
            {
                if (direct_size > 0)
                {
                    long[] hash = new long[MAX_NGRAM_ORDER];

                    for (a = 0; a < direct_order; a++)
                    {
                        hash[a] = 0;
                    }
                    for (a = 0; a < direct_order; a++)
                    {
                        b = 0;
                        if (a > 0)
                        {
                            if (history[a - 1] == -1)
                            {
                                break;
                            }
                        }
                        hash[a] = PRIMES[0] * PRIMES[1] * (long)(word_class_index + 1);

                        for (b = 1; b <= a; b++)
                        {
                            hash[a] += PRIMES[(a * PRIMES[b] + b) % PRIMES_SIZE] * (long)(history[b - 1] + 1);
                        }
                        hash[a] = (hash[a] % (direct_size / 2)) + (direct_size) / 2;
                    }

                    for (c = 0; c < word_class_cn; c++)
                    {
                        a = word_class_words[c];

                        for (b = 0; b < direct_order; b++)
                        {
                            if (hash[b] != 0)
                            {
                                neu2[a].ac += syn_d[hash[b]];
                                hash[b]++;
                                hash[b] = hash[b] % direct_size;
                            }
                            else
                            {
                                break;
                            }
                        }
                    }
                }
            }

            //activation 2   --softmax on words
            sum = 0;
            if (word != -1)
            {
                for (c = 0; c < word_class_cn; c++)
                {
                    a = word_class_words[c];
                    if (neu2[a].ac > 50)
                    {
                        neu2[a].ac = 50;  //for numerical stability
                    }
                    if (neu2[a].ac < -50)
                    {
                        neu2[a].ac = -50;  //for numerical stability
                    }
                    val = FAST_EXP(neu2[a].ac);
                    sum += val;
                    neu2[a].ac = val;
                }
                for (c = 0; c < word_class_cn; c++)
                {
                    neu2[word_class_words[c]].ac /= sum;
                }
            }
        }

        public void learnNet(int last_word, int word, int counter)
        {
            int t, step;
            double beta2, beta3;

            beta2 = beta * alpha;
            beta3 = beta2 * 1;	//beta3 can be possibly larger than beta2, as that is useful on small datasets (if the final model is to be interpolated wich backoff model) - todo in the future

            if (word == -1)
            {
                return;
            }

            int word_class_index = vocab[word].class_index;
            int word_class_cn = class_cn[word_class_index];
            int[] word_class_words = class_words[word_class_index];

            //compute error vectors
            for (int c = 0; c < word_class_cn; c++)
            {
                int a = word_class_words[c];
                neu2[a].er = (0 - neu2[a].ac);
            }
            neu2[word].er = (1 - neu2[word].ac);	//word part

            //flush error
            for (int a = 0; a < layer1_size; a++)
            {
                neu1[a].er = 0;
            }

            for (int a = 0; a < layerc_size; a++)
            {
                neuc[a].er = 0;
            }
            for (int a = vocab_size; a < layer2_size; a++)
            {
                neu2[a].er = (0 - neu2[a].ac);
            }
            neu2[word_class_index + vocab_size].er = (1 - neu2[word_class_index + vocab_size].ac);	//class part

            //
            if (direct_size > 0)
            {	//learn direct connections between words
                if (word != -1)
                {
                    long[] hash = new long[MAX_NGRAM_ORDER];

                    for (int a = 0; a < direct_order; a++)
                    {
                        hash[a] = 0;
                    }

                    for (int a = 0; a < direct_order; a++)
                    {
                        if (a > 0)
                        {
                            if (history[a - 1] == -1)
                            {
                                break;
                            }
                        }

                        hash[a] = PRIMES[0] * PRIMES[1] * (long)(word_class_index + 1);

                        for (int b = 1; b <= a; b++)
                        {
                            hash[a] += PRIMES[(a * PRIMES[b] + b) % PRIMES_SIZE] * (long)(history[b - 1] + 1);
                        }

                        hash[a] = (hash[a] % (direct_size / 2)) + (direct_size) / 2;
                    }

                    for (int c = 0; c < word_class_cn; c++)
                    {
                        int a = word_class_words[c];
                        for (int b = 0; b < direct_order; b++)
                        {
                            if (hash[b] != 0)
                            {
                                syn_d[hash[b]] += alpha * neu2[a].er - syn_d[hash[b]] * beta3;
                                hash[b]++;
                                hash[b] = hash[b] % direct_size;
                            }
                            else
                            {
                                break;
                            }
                        }
                    }
                }
            }
            //
            //learn direct connections to classes
            if (direct_size > 0)
            {	//learn direct connections between words and classes
                long[] hash = new long[MAX_NGRAM_ORDER];

                for (int a = 0; a < direct_order; a++)
                {
                    hash[a] = 0;
                }

                for (int a = 0; a < direct_order; a++)
                {
                    if (a > 0)
                    {
                        if (history[a - 1] == -1)
                        {
                            break;
                        }
                    }

                    hash[a] = PRIMES[0] * PRIMES[1];

                    for (int b = 1; b <= a; b++)
                    {
                        hash[a] += PRIMES[(a * PRIMES[b] + b) % PRIMES_SIZE] * (long)(history[b - 1] + 1);
                    }
                    hash[a] = hash[a] % (direct_size / 2);
                }

                for (int a = vocab_size; a < layer2_size; a++)
                {
                    for (int b = 0; b < direct_order; b++)
                    {
                        if (hash[b] != 0)
                        {
                            syn_d[hash[b]] += alpha * neu2[a].er - syn_d[hash[b]] * beta3;
                            hash[b]++;
                        }
                        else
                        {
                            break;
                        }
                    }
                }
            }
            //


            if (layerc_size > 0)
            {
                matrixXvector(neuc, neu2, sync, layerc_size, word_class_words[0], word_class_words[0] + word_class_cn, 0, layerc_size, 1);

                t = word_class_words[0] * layerc_size;
                for (int c = 0; c < word_class_cn; c++)
                {
                    int b = word_class_words[c];
                    if ((counter % 10) == 0)	//regularization is done every 10. step
                    {
                        for (int a = 0; a < layerc_size; a++)
                        {
                            sync[a + t].weight += alpha * neu2[b].er * neuc[a].ac - sync[a + t].weight * beta2;
                        }
                    }
                    else
                    {
                        for (int a = 0; a < layerc_size; a++)
                        {
                            sync[a + t].weight += alpha * neu2[b].er * neuc[a].ac;
                        }
                    }
                    t += layerc_size;
                }
                //
                matrixXvector(neuc, neu2, sync, layerc_size, vocab_size, layer2_size, 0, layerc_size, 1);		//propagates errors 2->c for classes

                int cc = vocab_size * layerc_size;
                for (int b = vocab_size; b < layer2_size; b++)
                {
                    if ((counter % 10) == 0)
                    {	//regularization is done every 10. step
                        for (int a = 0; a < layerc_size; a++)
                        {
                            sync[a + cc].weight += alpha * neu2[b].er * neuc[a].ac - sync[a + cc].weight * beta2;	//weight c->2 update
                        }
                    }
                    else
                    {
                        for (int a = 0; a < layerc_size; a++)
                        {
                            sync[a + cc].weight += alpha * neu2[b].er * neuc[a].ac;	//weight c->2 update
                        }
                    }
                    cc += layerc_size;
                }

                for (int a = 0; a < layerc_size; a++)
                {
                    neuc[a].er = neuc[a].er * neuc[a].ac * (1 - neuc[a].ac);    //error derivation at compression layer
                }

                ////

                matrixXvector(neu1, neuc, syn1, layer1_size, 0, layerc_size, 0, layer1_size, 1);		//propagates errors c->1

                for (int b = 0; b < layerc_size; b++)
                {
                    for (int a = 0; a < layer1_size; a++)
                    {
                        syn1[a + b * layer1_size].weight += alpha * neuc[b].er * neu1[a].ac;	//weight 1->c update
                    }
                }
            }
            else
            {
                matrixXvector(neu1, neu2, syn1, layer1_size, word_class_words[0], word_class_words[0] + word_class_cn, 0, layer1_size, 1);

                t = word_class_words[0] * layer1_size;
                for (int c = 0; c < word_class_cn; c++)
                {
                    int b = word_class_words[c];
                    if ((counter % 10) == 0)	//regularization is done every 10. step
                    {
                        for (int a = 0; a < layer1_size; a++)
                        {
                            syn1[a + t].weight += alpha * neu2[b].er * neu1[a].ac - syn1[a + t].weight * beta2;
                        }
                    }
                    else
                    {
                        for (int a = 0; a < layer1_size; a++)
                        {
                            syn1[a + t].weight += alpha * neu2[b].er * neu1[a].ac;
                        }
                    }
                    t += layer1_size;
                }

                //
                matrixXvector(neu1, neu2, syn1, layer1_size, vocab_size, layer2_size, 0, layer1_size, 1);		//propagates errors 2->1 for classes

                int cc = vocab_size * layer1_size;
                for (int b = vocab_size; b < layer2_size; b++)
                {
                    if ((counter % 10) == 0)
                    {	//regularization is done every 10. step
                        for (int a = 0; a < layer1_size; a++)
                        {
                            syn1[a + cc].weight += alpha * neu2[b].er * neu1[a].ac - syn1[a + cc].weight * beta2;	//weight 1->2 update
                        }
                    }
                    else
                    {
                        for (int a = 0; a < layer1_size; a++)
                        {
                            syn1[a + cc].weight += alpha * neu2[b].er * neu1[a].ac;	//weight 1->2 update
                        }
                    }
                    cc += layer1_size;
                }
            }

            //

            ///////////////

            if (bptt <= 1)
            {		//bptt==1 -> normal BP
                for (int a = 0; a < layer1_size; a++)
                {
                    neu1[a].er = neu1[a].er * neu1[a].ac * (1 - neu1[a].ac);    //error derivation at layer 1
                }

                //weight update 1->0
                //   a = last_word;
                if (last_word != -1)
                {
                    if ((counter % 10) == 0)
                    {
                        for (int b = 0; b < layer1_size; b++)
                        {
                            syn0[last_word + b * layer0_size].weight += alpha * neu1[b].er * neu0[last_word].ac - syn0[last_word + b * layer0_size].weight * beta2;
                        }
                    }
                    else
                    {
                        for (int b = 0; b < layer1_size; b++)
                        {
                            syn0[last_word + b * layer0_size].weight += alpha * neu1[b].er * neu0[last_word].ac;
                        }
                    }
                }

                if ((counter % 10) == 0)
                {
                    for (int b = 0; b < layer1_size; b++)
                    {
                        for (int a = layer0_size - layer1_size; a < layer0_size; a++)
                        {
                            syn0[a + b * layer0_size].weight += alpha * neu1[b].er * neu0[a].ac - syn0[a + b * layer0_size].weight * beta2;
                        }
                    }
                }
                else
                {
                    for (int b = 0; b < layer1_size; b++)
                    {
                        for (int a = layer0_size - layer1_size; a < layer0_size; a++)
                        {
                            syn0[a + b * layer0_size].weight += alpha * neu1[b].er * neu0[a].ac;
                        }
                    }
                }
            }
            else		//BPTT
            {
                for (int b = 0; b < layer1_size; b++)
                {
                    bptt_hidden[b].ac = neu1[b].ac;
                    bptt_hidden[b].er = neu1[b].er;
                }

                if (((counter % bptt_block) == 0) || word == 0)
                {
                    for (step = 0; step < bptt + bptt_block - 2; step++)
                    {
                        for (int a = 0; a < layer1_size; a++)
                        {
                            neu1[a].er = neu1[a].er * neu1[a].ac * (1 - neu1[a].ac);    //error derivation at layer 1
                        }

                        //weight update 1->0
                        //    a = bptt_history[step];
                        if (bptt_history[step] != -1)
                        {
                            for (int b = 0; b < layer1_size; b++)
                            {
                                bptt_syn0[bptt_history[step] + b * layer0_size].weight += alpha * neu1[b].er;//*neu0[a].ac; --should be always set to 1
                            }
                        }

                        for (int a = layer0_size - layer1_size; a < layer0_size; a++)
                        {
                            neu0[a].er = 0;
                        }

                        matrixXvector(neu0, neu1, syn0, layer0_size, 0, layer1_size, layer0_size - layer1_size, layer0_size, 1);		//propagates errors 1->0

                        for (int b = 0; b < layer1_size; b++)
                        {
                            for (int a = layer0_size - layer1_size; a < layer0_size; a++)
                            {
                                bptt_syn0[a + b * layer0_size].weight += alpha * neu1[b].er * neu0[a].ac;
                            }
                        }

                        for (int a = 0; a < layer1_size; a++)
                        {		
                            //propagate error from time T-n to T-n-1
                            neu1[a].er = neu0[a + layer0_size - layer1_size].er + bptt_hidden[(step + 1) * layer1_size + a].er;
                        }

                        if (step < bptt + bptt_block - 3)
                        {
                            for (int a = 0; a < layer1_size; a++)
                            {
                                neu1[a].ac = bptt_hidden[(step + 1) * layer1_size + a].ac;
                                neu0[a + layer0_size - layer1_size].ac = bptt_hidden[(step + 2) * layer1_size + a].ac;
                            }
                        }
                    }

                    for (int a = 0; a < (bptt + bptt_block) * layer1_size; a++)
                    {
                        bptt_hidden[a].er = 0;
                    }

                    for (int b = 0; b < layer1_size; b++)
                    {
                        neu1[b].ac = bptt_hidden[b].ac;		//restore hidden layer after bptt
                    }

                    //

                    for (int b = 0; b < layer1_size; b++)
                    {
                        //copy temporary syn0
                        if ((counter % 10) == 0)
                        {
                            for (int a = layer0_size - layer1_size; a < layer0_size; a++)
                            {
                                syn0[a + b * layer0_size].weight += bptt_syn0[a + b * layer0_size].weight - syn0[a + b * layer0_size].weight * beta2;
                                bptt_syn0[a + b * layer0_size].weight = 0;
                            }
                        }
                        else
                        {
                            for (int a = layer0_size - layer1_size; a < layer0_size; a++)
                            {
                                syn0[a + b * layer0_size].weight += bptt_syn0[a + b * layer0_size].weight;
                                bptt_syn0[a + b * layer0_size].weight = 0;
                            }
                        }

                        if ((counter % 10) == 0)
                        {
                            for (step = 0; step < bptt + bptt_block - 2; step++)
                            {
                                if (bptt_history[step] != -1)
                                {
                                    syn0[bptt_history[step] + b * layer0_size].weight += bptt_syn0[bptt_history[step] + b * layer0_size].weight - syn0[bptt_history[step] + b * layer0_size].weight * beta2;
                                    bptt_syn0[bptt_history[step] + b * layer0_size].weight = 0;
                                }
                            }
                        }
                        else
                        {
                            for (step = 0; step < bptt + bptt_block - 2; step++)
                            {
                                if (bptt_history[step] != -1)
                                {
                                    syn0[bptt_history[step] + b * layer0_size].weight += bptt_syn0[bptt_history[step] + b * layer0_size].weight;
                                    bptt_syn0[bptt_history[step] + b * layer0_size].weight = 0;
                                }
                            }
                        }
                    }
                }
            }
        }

        public void copyHiddenLayerToInput()
        {
            for (int a = 0; a < layer1_size; a++)
            {
                neu0[a + layer0_size - layer1_size].ac = neu1[a].ac;
            }
        }



        public void netReset()   //cleans hidden layer activation + bptt history
        {
            int a, b;

            for (a = 0; a < layer1_size; a++)
            {
                neu1[a].ac = 1.0;
            }

            copyHiddenLayerToInput();

            if (bptt > 0)
            {
                for (a = 1; a < bptt + bptt_block; a++)
                {
                    bptt_history[a] = -1;
                }

                for (a = bptt + bptt_block - 1; a > 1; a--)
                {
                    for (b = 0; b < layer1_size; b++)
                    {
                        bptt_hidden[a * layer1_size + b].ac = 0;
                        bptt_hidden[a * layer1_size + b].er = 0;
                    }
                }
            }

            for (a = 0; a < MAX_NGRAM_ORDER; a++)
            {
                history[a] = 0;
            }
        }

        public void restoreWeights()      //restores current weights and unit activations from backup copy
        {
            int a, b;

            for (a = 0; a < layer0_size; a++)
            {
                neu0[a].ac = neu0b[a].ac;
                neu0[a].er = neu0b[a].er;
            }

            for (a = 0; a < layer1_size; a++)
            {
                neu1[a].ac = neu1b[a].ac;
                neu1[a].er = neu1b[a].er;
            }

            for (a = 0; a < layerc_size; a++)
            {
                neuc[a].ac = neucb[a].ac;
                neuc[a].er = neucb[a].er;
            }

            for (a = 0; a < layer2_size; a++)
            {
                neu2[a].ac = neu2b[a].ac;
                neu2[a].er = neu2b[a].er;
            }

            for (b = 0; b < layer1_size; b++)
            {
                for (a = 0; a < layer0_size; a++)
                {
                    syn0[a + b * layer0_size].weight = syn0b[a + b * layer0_size].weight;
                }
            }

            if (layerc_size > 0)
            {
                for (b = 0; b < layerc_size; b++)
                {
                    for (a = 0; a < layer1_size; a++)
                    {
                        syn1[a + b * layer1_size].weight = syn1b[a + b * layer1_size].weight;
                    }

                }

                for (b = 0; b < layer2_size; b++)
                {
                    for (a = 0; a < layerc_size; a++)
                    {
                        sync[a + b * layerc_size].weight = syncb[a + b * layerc_size].weight;
                    }
                }
            }
            else
            {
                for (b = 0; b < layer2_size; b++)
                {
                    for (a = 0; a < layer1_size; a++)
                    {
                        syn1[a + b * layer1_size].weight = syn1b[a + b * layer1_size].weight;
                    }
                }
            }
        }

        public void restoreNet(string strModelFile)    //will read whole network structure
        {
            StreamReader fi = new StreamReader(strModelFile, Encoding.UTF8);
            int a, b;
            double d;

            string strLine = null;
            int pos = 0;

            strLine = fi.ReadLine();
            pos = strLine.IndexOf(":");
            llogp = double.Parse(strLine.Substring(pos + 1).Trim());


            strLine = fi.ReadLine();
            pos = strLine.IndexOf(":");
            train_cur_pos = int.Parse(strLine.Substring(pos + 1).Trim());


            strLine = fi.ReadLine();
            pos = strLine.IndexOf(":");
            logp = double.Parse(strLine.Substring(pos + 1).Trim());


            strLine = fi.ReadLine();
            pos = strLine.IndexOf(":");
            save_step = int.Parse(strLine.Substring(pos + 1).Trim());


            strLine = fi.ReadLine();
            pos = strLine.IndexOf(":");
            layer0_size = int.Parse(strLine.Substring(pos + 1).Trim());

            strLine = fi.ReadLine();
            pos = strLine.IndexOf(":");
            layer1_size = int.Parse(strLine.Substring(pos + 1).Trim());

            strLine = fi.ReadLine();
            pos = strLine.IndexOf(":");
            layerc_size = int.Parse(strLine.Substring(pos + 1).Trim());

            strLine = fi.ReadLine();
            pos = strLine.IndexOf(":");
            layer2_size = int.Parse(strLine.Substring(pos + 1).Trim());

            strLine = fi.ReadLine();
            pos = strLine.IndexOf(":");
            direct_size = long.Parse(strLine.Substring(pos + 1).Trim());


            strLine = fi.ReadLine();
            pos = strLine.IndexOf(":");
            direct_order = int.Parse(strLine.Substring(pos + 1).Trim());


            strLine = fi.ReadLine();
            pos = strLine.IndexOf(":");
            bptt = int.Parse(strLine.Substring(pos + 1).Trim());

            strLine = fi.ReadLine();
            pos = strLine.IndexOf(":");
            bptt_block = int.Parse(strLine.Substring(pos + 1).Trim());


            strLine = fi.ReadLine();
            pos = strLine.IndexOf(":");
            vocab_size = int.Parse(strLine.Substring(pos + 1).Trim());


            strLine = fi.ReadLine();
            pos = strLine.IndexOf(":");
            class_size = int.Parse(strLine.Substring(pos + 1).Trim());


            strLine = fi.ReadLine();
            pos = strLine.IndexOf(":");
            old_classes = int.Parse(strLine.Substring(pos + 1).Trim());


            strLine = fi.ReadLine();
            pos = strLine.IndexOf(":");
            starting_alpha = double.Parse(strLine.Substring(pos + 1).Trim());


            strLine = fi.ReadLine();
            pos = strLine.IndexOf(":");
            if (alpha_set == 0)
            {
                alpha = double.Parse(strLine.Substring(pos + 1).Trim());
            }
            else
            {
                d = double.Parse(strLine.Substring(pos + 1).Trim());
            }

            strLine = fi.ReadLine();
            pos = strLine.IndexOf(":");
            alpha_divide = int.Parse(strLine.Substring(pos + 1).Trim());

            //Empty line && Vocabulary
            while (true)
            {
                strLine = fi.ReadLine().Trim();
                if (strLine.Length > 0)
                {
                    break;
                }
            }

            vocab = new List<vocab_word>();
            vocab_hash = new Dictionary<string, int>();
            for (a = 0; a < vocab_size; a++)
            {
                strLine = fi.ReadLine();
                string[] items = strLine.Split('\t');
                vocab_word vw = new vocab_word(items[2]);

                vocab_hash.Add(items[2], int.Parse(items[0]));

                vw.cn = int.Parse(items[1]);
                vw.word = items[2];
                vw.class_index = int.Parse(items[3]);

                vocab.Add(vw);


            }

            if (neu0 == null)
            {
                initNet();		//memory allocation here
            }

            //Empty line
            fi.ReadLine();
            strLine = fi.ReadLine();
            for (a = 0; a < layer1_size; a++)
            {
                neu1[a].ac = double.Parse(fi.ReadLine());
            }

            fi.ReadLine();
            strLine = fi.ReadLine();
            for (b = 0; b < layer1_size; b++)
            {
                for (a = 0; a < layer0_size; a++)
                {
                    d = double.Parse(fi.ReadLine());
                    syn0[a + b * layer0_size].weight = d;
                }
            }

            fi.ReadLine();
            strLine = fi.ReadLine();
            if (layerc_size == 0)
            {	//no compress layer
                for (b = 0; b < layer2_size; b++)
                {
                    for (a = 0; a < layer1_size; a++)
                    {
                        d = double.Parse(fi.ReadLine());
                        syn1[a + b * layer1_size].weight = d;
                    }
                }
            }
            else
            {				//with compress layer
                for (b = 0; b < layerc_size; b++)
                {
                    for (a = 0; a < layer1_size; a++)
                    {
                        d = double.Parse(fi.ReadLine());
                        syn1[a + b * layer1_size].weight = d;
                    }
                }

                //Empty line
                fi.ReadLine();
                for (b = 0; b < layer2_size; b++)
                {
                    for (a = 0; a < layerc_size; a++)
                    {
                        d = double.Parse(fi.ReadLine());
                        sync[a + b * layerc_size].weight = d;
                    }
                }
            }

            fi.ReadLine();
            strLine = fi.ReadLine();

            //direct conenctions
            long aa;
            for (aa = 0; aa < direct_size; aa++)
            {
                d = double.Parse(fi.ReadLine());
                syn_d[aa] = d;
            }


            saveWeights();

            fi.Close();
        }
    }
}
