﻿using System;
using System.IO;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using NIP.Speech;
using System.Diagnostics;
using SpeechLib;
using System.Collections;

namespace NIP.Speech.Reco
{

    public class Reco
    {
        private const String DefaultAction = "GET";
        private const String DefaultTarget = "ME";

        private const int grammarId = 10;
        private bool speechEnabled;
        private bool speechInitialized;
        private SpeechLib.SpInProcRecoContext objRecoContext;
        private SpeechLib.ISpeechRecoGrammar grammar;
        private string grammarFileName = "Grammar\\StateGrammar_ByName.xml";

        #region Events
        public event EventHandler<EventArgs.CommandRecognizedEventArgs> CommandRecognized;
        public event EventHandler<EventArgs.CommandRecognized_Dictionary_EventArgs> CommandRecognized_Dictionary;
        #endregion Events

        public Reco()
        {
            if (!this.speechInitialized)
            {
                this.InitializeSpeech();
            }
            if (!this.speechEnabled)
            {
                this.SpeechEnabled = true;
            }
        }

        /// <summary>
        ///     Property SpeechEnabled is read/write-able. When it's set to
        ///     true, speech recognition will be started. When it's set to
        ///     false, speech recognition will be stopped.
        /// </summary>
        public bool SpeechEnabled
        {
            get
            {
                return speechEnabled;
            }
            set
            {
                if (speechEnabled != value)
                {
                    speechEnabled = value;
                    //if (this.DesignMode) return;

                    if (speechEnabled)
                    {
                        EnableSpeech();
                    }
                    else
                    {
                        DisableSpeech();
                    }
                }
            }
        }

        /// <summary>
        /// Activate top level rules for State
        /// </summary>
        /// <param name="sender"></param>
        /// <param name="args"></param>
        public void StateInitiated_Handler(object sender, System.EventArgs args)
        {
            var ruleName = ((Dialog.IDialogState)sender).Type.ToString();
            try
            {

                grammar.CmdSetRuleState(ruleName, 
                    SpeechRuleState.SGDSActive);
                
                var rule = (SpeechRuleAttributes)grammar.Rules.FindRule(ruleName).Attributes;
                grammar.Rules.Commit();
                Debug.WriteLine("Activated Rule "+ruleName);
            }
            catch
            {
                throw new Exception("Fail in switching Grammar for State: "+  ruleName);
            }
        }
        public void StateCompleted_Handler(object sender, System.EventArgs args)
        {
            var ruleName = ((Dialog.IDialogState)sender).Type.ToString();
            try
            {
                grammar.CmdSetRuleState(ruleName,
                    SpeechRuleState.SGDSInactive);
                var rule = (SpeechRuleAttributes)grammar.Rules.FindRule(ruleName).Attributes;
                grammar.Rules.Commit();
                Debug.WriteLine("Deactivated Rule" + ruleName);
            }
            catch
            {
                throw new Exception("Fail in removing Grammar for State: " + ruleName);
            }
        }
        private void InitializeSpeech()
        {
            //grammarFileName = Path.GetFullPath(grammarFileName);

            //string extension = Path.GetExtension(grammarFileName);
            //string filename = Path.GetFileName(grammarFileName);
            //string filenameNoExtension = Path.GetFileNameWithoutExtension(grammarFileName);
            //string root = Path.GetPathRoot(grammarFileName);

            //Console.WriteLine("{0}\n{1}\n{2}\n{3}",
            //    extension,
            //    filename,
            //    filenameNoExtension,
            //    root);
           
            Debug.WriteLine("Initializing SAPI objects...");
            try
            {
                // First of all, let's create the main reco context object. 
                // In this sample, we are using inproc reco context. Shared reco
                // context is also available. Please see the document to decide
                // which is best for your application.
                objRecoContext = new SpeechLib.SpInProcRecoContext();
               
                SpeechLib.SpObjectTokenCategory objAudioTokenCategory = new SpeechLib.SpObjectTokenCategory();
                objAudioTokenCategory.SetId(SpeechLibSpeechStringConstants.SpeechCategoryAudioIn, false);

                SpeechLib.SpObjectToken objAudioToken = new SpeechLib.SpObjectToken();
                objAudioToken.SetId(objAudioTokenCategory.Default, SpeechLibSpeechStringConstants.SpeechCategoryAudioIn, false);

                objRecoContext.Recognizer.AudioInput = objAudioToken;
                // Then, let's set up the event handler. We only care about
                // Hypothesis and Recognition events in this sample.
                objRecoContext.Hypothesis += new
                    _ISpeechRecoContextEvents_HypothesisEventHandler(
                    RecoContext_Hypothesis);

                objRecoContext.Recognition += new
                    _ISpeechRecoContextEvents_RecognitionEventHandler(
                    RecoContext_Recognition);
                grammar = objRecoContext.CreateGrammar(grammarId);
                //! Load grammar
                LoadCFGGrammar();
                //basicDialog = grammar.Rules.Add("Action", SpeechRuleAttributes.SRATopLevel | SpeechRuleAttributes.SRADynamic, 1);
                //basicDialog.InitialState.AddWordTransition(null,"Hello");
                //basicDialog.InitialState.AddWordTransition(null, "Help");
                //basicDialog.InitialState.AddWordTransition(null, "Get");
                
                //grammar.Rules.Commit();
                //grammar.CmdSetRuleState("Action", SpeechRuleState.SGDSActive);
                speechInitialized = true;
            }
            catch (Exception e)
            {
                System.Windows.Forms.MessageBox.Show(
                    "Exception caught when initializing SAPI."
                    + " This application may not run correctly.\r\n\r\n"
                    + e.ToString(),
                    "Error");

                throw;
            }
        }

        private void LoadCFGGrammar()
        {

            Debug.WriteLine("Building grammar from XML...");
            if (grammar == null)
            {
                grammar = objRecoContext.CreateGrammar(grammarId);
            }
            try
            {
                //grammarFileName = Path.GetFullPath(grammarFileName);
                grammar.CmdLoadFromFile(grammarFileName, SpeechLib.SpeechLoadOption.SLODynamic);
                //grammar.CmdSetRuleState("Command", SpeechRuleState.SGDSActive);
                //grammar.CmdSetRuleState("TestTop", SpeechRuleState.SGDSInactive);
                //grammar.Rules.Commit();

            }
            catch (Exception e)
            {
                System.Windows.Forms.MessageBox.Show(
                    "Exp when loading Grammar"
                    + e.ToString(),
                    "Error");
                throw;
            }
        }

        /// <summary>
        ///     This is a private function that stops speech recognition.
        /// </summary>
        /// <returns></returns>
        private bool DisableSpeech()
        {
            Debug.Assert(speechInitialized,
                          "speech must be initialized in DisableSpeech");

            if (speechInitialized)
            {
                // Putting the recognition context to disabled state will 
                // stop speech recognition. Changing the state to enabled 
                // will start recognition again.
                objRecoContext.State = SpeechRecoContextState.SRCS_Disabled;
            }

            return true;
        }

   
        /// <summary>
        ///     EnableSpeech will initialize all speech objects on first time,
        ///     then rebuild grammar and start speech recognition.
        /// </summary>
        /// <returns>
        ///     true if speech is enabled and grammar updated.
        ///     false otherwise, which happens if we are in design mode.
        /// </returns>
        /// <remarks>
        ///     This is a private function.
        /// </remarks>
        private bool EnableSpeech()
        {
            Debug.Assert(speechEnabled, "speechEnabled must be true in EnableSpeech");

            if (speechInitialized == false)
            {
                InitializeSpeech();
            }
            else
            {
                //! Disable as does not need to be rebuilt
                //RebuildGrammar(); 
                grammar.Rules.Commit();
            }

            objRecoContext.State = SpeechRecoContextState.SRCS_Enabled;
            return true;
        }
 
        /// <summary>
        ///     RecoContext_Hypothesis is the event handler function for 
        ///     SpInProcRecoContext object's Hypothesis event.
        /// </summary>
        /// <param name="StreamNumber"></param>
        /// <param name="StreamPosition"></param>
        /// <param name="Result"></param>
        /// <remarks>
        ///     See EnableSpeech() for how to hook up this function with the 
        ///     event.
        /// </remarks>
        private void RecoContext_Hypothesis(int StreamNumber,
            object StreamPosition,
            ISpeechRecoResult Result)
        {
            Debug.WriteLine("Hypothesis: " +
                Result.PhraseInfo.GetText(0, -1, true) + ", " +
                StreamNumber + ", " + StreamPosition);
        }

        
        /// <summary>
        ///     RecoContext_Hypothesis is the event handler function for 
        ///     SpInProcRecoContext object's Recognition event.
        /// </summary>
        /// <param name="StreamNumber"></param>
        /// <param name="StreamPosition"></param>
        /// <param name="RecognitionType"></param>
        /// <param name="Result"></param>
        /// <remarks>
        ///     See EnableSpeech() for how to hook up this function with the 
        ///     event.
        /// </remarks>
        private void RecoContext_Recognition(int StreamNumber,
            object StreamPosition,
            SpeechRecognitionType RecognitionType,
            ISpeechRecoResult Result)
        {
            Debug.WriteLine("Recognition: " +
                Result.PhraseInfo.GetText(0, -1, true) + ", " +
                StreamNumber + ", " + StreamPosition);


            //ArrayList _actionList = new ArrayList();
            //! deprecated            
            //string _action = DefaultAction;
            //string _object = "";
            //getCommandInfo(Result.PhraseInfo.Properties, ref _action, ref _object, ref _actionList);

            //! in use
            //Structures.SpeechCommandInfo info = new Structures.SpeechCommandInfo(true);
            //EventArgs.CommandRecognizedEventArgs info = new EventArgs.CommandRecognizedEventArgs();
            //getCommandInfo(Result.PhraseInfo.Properties, ref info);
             
            //! in use
            var info = new EventArgs.CommandRecognized_Dictionary_EventArgs();
            getCommandInfo(Result.PhraseInfo.Properties, ref info);

            //info.Action = Result.PhraseInfo.GetText(0, -1, true);
            
            if ((System.Decimal)Result.PhraseInfo.GrammarId == grammarId)
            {
                /*Invoke Event with Action and Object required*/

                //VoiceCommandArgs arg = new VoiceCommandArgs(_action, _object, _actionList);
                //NewVCReceived(this, arg);


                /* Invoke event by SpeechPhraseProperties*/
                //! deprecated
                //NewVCReceived(this, new CommandPhraseArgs(Result.PhraseInfo.Properties));

                try
                {
                    if ( CommandRecognized != null)
                    {
                        //CommandRecognized(this, info);
                        CommandRecognized_Dictionary(this, info);
                    }
                }
                catch (Exception exc)
                {
                    Debug.WriteLine(exc.Message);
                }

            }


        }

        #region Deprecated
        //! deprecated
        /*
        private void getCommandInfo(ISpeechPhraseProperties aProperties, ref string _action, ref string _object, ref ArrayList _actionList)
        {
            if (aProperties == null) // no children
            {
                return;
            }
            else
            {
                ISpeechPhraseProperty oItem;
                for (int i = 0; i < aProperties.Count; i++)
                {
                    oItem = aProperties.Item(i);
                    switch (oItem.Name)
                    {
                        case "CommandAction":
                        case "ActionDirection":
                            _actionList.Add(oItem.Children.Item(0).Name);
                            break;
                        case "TestCommand":
                            _action = oItem.Children.Item(0).Name;
                            break;
                        case "CommandObject":
                            _object = oItem.Children.Item(0).Name;
                            break;
                        default:
                            break;
                    }

                    getCommandInfo(oItem.Children, ref _action, ref _object, ref _actionList);
                }
            }
        }
        */
        #endregion

        /// <summary>
        /// A recursive function to extract information from voice command received
        /// </summary>
        /// <param name="aProperties">Phrase Properties from Recognition Result</param>
        /// <param name="info"> The output information </param>
        private void getCommandInfo(ISpeechPhraseProperties aProperties, ref EventArgs.CommandRecognizedEventArgs info)
        {
            if (aProperties == null) // no children
            {
                return;
            }
            else
            {
                ISpeechPhraseProperty oItem;
                for (int i = 0; i < aProperties.Count; i++)
                {
                    oItem = aProperties.Item(i);
                    switch (oItem.Name)
                    {
                        case "CommandAction":
                            info.Action = oItem.Children.Item(0).Name;
                            break;
                        case "ActionDirection":
                            info.Direction = oItem.Children.Item(0).Name;
                            break;
                        case "TestCommand":
                            info.Action = oItem.Children.Item(0).Name;
                            break;
                        case "CommandObject":
                            info.Object = oItem.Children.Item(0).Name;
                            break;
                        case "CommandTarget":
                            info.Target = oItem.Children.Item(0).Name;
                            break;
                        default:
                            break;
                    }

                    getCommandInfo(oItem.Children, ref info);
                }
            }
        }
        private void getCommandInfo(ISpeechPhraseProperties aProperties, ref EventArgs.CommandRecognized_Dictionary_EventArgs info)
        {
            if (aProperties == null) // no children
            {
                return;
            }
            else
            {
                ISpeechPhraseProperty oItem;
                for (int i = 0; i < aProperties.Count; i++)
                {
                    oItem = aProperties.Item(i);
                    if (oItem.Name != "" && oItem.Children != null)
                    {
                        if (oItem.Children.Item(0).Name != "")
                        {
                            info.recognizedText.Add(oItem.Name.ToLower(), oItem.Children.Item(0).Name.ToLower());
                        }
                    }
                    getCommandInfo(oItem.Children, ref info);
                }
            }
        }
    }

    /// <summary>
    /// Workaround for SpeechLib.SpeechStringConstants Embeded Code
    /// </summary>
    internal abstract class SpeechLibSpeechStringConstants
    {
        public const string SpeechCategoryAudioIn = @"HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Speech\AudioInput";
        public const string SpeechCategoryAudioOut = @"HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Speech\AudioOutput";
    }
}
