﻿using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;

namespace LMDecoder
{
    public class RNNDecoder : RnnLM
    {
        public void LoadLM(string strModelFile)
        {
            restoreNet(strModelFile);
            copyHiddenLayerToInput();
            netReset();
        }

        public RnnLMResult GetSentProb(string strLine)
        {
            strLine = strLine + " </s>";
            string[] tokens = strLine.Split();


            int a, b, word, last_word, wordcn;
            double prob_other, log_other, log_combine;

            last_word = 0;					//last word = end of sentence
            logp = 0;
            log_other = 0;
            log_combine = 0;
            prob_other = 0;
            wordcn = 0;


            foreach (string token in tokens)
            {
                if (token.Length == 0)
                {
                    continue;
                }

                //Get word index from vocab
                if (vocab_hash.ContainsKey(token) == true)
                {
                    word = vocab_hash[token];
                }
                else
                {
                    word = -1;
                }

                computeNet(last_word, word);		//compute probability distribution

                if ((word != -1) || (prob_other > 0))
                {
                    if (word == -1)
                    {
                        logp += -8;		//some ad hoc penalty - when mixing different vocabularies, single model score is not real PPL
                        log_combine += Math.Log10(0 * lambda + prob_other * (1 - lambda));
                    }
                    else
                    {
                        logp += Math.Log10(neu2[vocab[word].class_index + vocab_size].ac * neu2[word].ac);
                        log_combine += Math.Log10(neu2[vocab[word].class_index + vocab_size].ac * neu2[word].ac * lambda + prob_other * (1 - lambda));
                    }
                    log_other += Math.Log10(prob_other);
                    wordcn++;
                }

                if (dynamic > 0)
                {
                    if (bptt > 0)
                    {
                        for (a = bptt + bptt_block - 1; a > 0; a--)
                        {
                            bptt_history[a] = bptt_history[a - 1];
                        }

                        bptt_history[0] = last_word;

                        for (a = bptt + bptt_block - 1; a > 0; a--)
                        {
                            for (b = 0; b < layer1_size; b++)
                            {
                                bptt_hidden[a * layer1_size + b].ac = bptt_hidden[(a - 1) * layer1_size + b].ac;
                                bptt_hidden[a * layer1_size + b].er = bptt_hidden[(a - 1) * layer1_size + b].er;
                            }
                        }
                    }

                    alpha = dynamic;
                    learnNet(last_word, word, 0);    //dynamic update
                }
                copyHiddenLayerToInput();

                if (last_word != -1)
                {
                    neu0[last_word].ac = 0;  //delete previous activation
                }

                last_word = word;

                for (a = MAX_NGRAM_ORDER - 1; a > 0; a--)
                {
                    history[a] = history[a - 1];
                }
                history[0] = last_word;

                if (word == 0)
                {
                    netReset();
                    last_word = 0;
                }
            }

            double ppl = Math.Exp((-logp / (double)wordcn) * 2.302585093);

            RnnLMResult LMRst = new RnnLMResult();
            LMRst.perplexity = ppl;
            LMRst.logProb = logp;
            LMRst.oovs = tokens.Length - wordcn;

            return LMRst;
        }
    }
}
