﻿using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
// Grammar and speech things
using System.Speech.AudioFormat;
using System.Speech.Recognition.SrgsGrammar;
using System.Speech.Recognition;

namespace grounding.parser
{
    public enum GrammarType
    {
        Dictation = 0,
        YesNo
    }

    /// <summary>
    /// The SpeechToText class is responsible for performing the following tasks:
    /// <list type="bullet">
    /// <item>Alerting other modules of new recognied text (via events)</item>
    /// </list>
    /// </summary>
    public class SpeechToText
    {
        // Grammars:
        Grammar yesNoGrammar;
        Grammar dictationGrammar;

        // Just a pointer to the current grammar. defaults to dictationGrammar
        Grammar currentGrammar;

        private Grammar yesNoBuilder()
        {
            if (yesNoGrammar == null)
            {
                yesNoGrammar = new Grammar(new GrammarBuilder(new Choices(new string[] { "yes", "no" })));
                yesNoGrammar.Name = "yesno";
            }
            return yesNoGrammar;
        }

        /// <summary>
        /// Creates a grammar that recognizes completely arbitrary English sentences
        /// </summary>
        /// <returns>A new Grammar</returns>
        private Grammar dictationBuilder() {
            if (this.dictationGrammar == null)
            {
                GrammarBuilder builder = new GrammarBuilder();
                GrammarBuilder dictation = new GrammarBuilder();
                dictation.AppendDictation();

                builder.Append(new SemanticResultKey("DictationInput", dictation));

                this.dictationGrammar = new Grammar(builder);
                this.dictationGrammar.Name = "dictation";
            }

            return this.dictationGrammar;
        }   

        private SpeechRecognitionEngine speechEngine;

        #region Public member variables
        /// <summary>
        /// This event fires when text has been recognized by the speech-to-text class
        /// </summary>
        public event EventHandler<SpeechRecognizedEventArgs> SpeechRecognized;

        /// <summary>
        /// If true, display debug text
        /// </summary>
        public bool DebugMode { get; set; }

        private bool isRunning = false;
        public bool IsRunning { get { return isRunning; } }
        #endregion

        #region Public methods
        /// <summary>
        /// Setup our speech-to-text module, load grammers, etc.
        /// </summary>
        public SpeechToText()
        {
            // Setup member variables
            DebugMode = true;
            RecognizerInfo rec = SpeechRecognitionEngine.InstalledRecognizers().FirstOrDefault();
            speechEngine = new SpeechRecognitionEngine(rec.Id);

            // Load grammar here
            //speechEngine.LoadGrammar( <grammar> );
            //speechEngine.LoadGrammar(yesNoBuilder());
            speechEngine.LoadGrammar(dictationBuilder());

            // Attach to events
            speechEngine.SpeechDetected += new EventHandler<SpeechDetectedEventArgs>(speechEngine_SpeechDetected);
            speechEngine.SpeechHypothesized += new EventHandler<SpeechHypothesizedEventArgs>(speechEngine_SpeechHypothesized);
            speechEngine.SpeechRecognitionRejected += new EventHandler<SpeechRecognitionRejectedEventArgs>(speechEngine_SpeechRecognitionRejected);
            speechEngine.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(speechEngine_SpeechRecognized);
        }

        /// <summary>
        /// Starts speech recognition
        /// </summary>
        public void Start()
        {
            speechEngine.SetInputToDefaultAudioDevice();
            speechEngine.RecognizeAsync(RecognizeMode.Multiple);
            isRunning = true;
        }

        /// <summary>
        /// Stops speech recognition
        /// </summary>
        public void Stop()
        {
            speechEngine.RecognizeAsyncStop();
            //speechEngine.SetInputToNull();
            isRunning = false;
        }

        public void LoadGrammar(Grammar g)
        {
            // Could be changed to private, but it doesn't really matter

            if (DebugMode)
            {
                Console.WriteLine("LOADING GRAMMAR");
                Console.WriteLine("\tgrammar name: {0}", g.Name);
            }

            speechEngine.RequestRecognizerUpdate();
            speechEngine.UnloadGrammar(this.currentGrammar);
            this.currentGrammar = g;
            speechEngine.LoadGrammar(this.currentGrammar);
        }

        /// <summary>
        /// Loads the specified grammar as the "current grammar". Defaults to the dictation grammar
        /// </summary>
        /// <param name="grammarType">Enum value corresponding to the grammar to load.</param>
        public void LoadGrammar(GrammarType grammarType)
        {
            switch (grammarType)
            {
                case GrammarType.Dictation:
                    LoadGrammar(this.dictationGrammar);
                    break;
                case GrammarType.YesNo:
                    LoadGrammar(this.yesNoGrammar);
                    break;
                default:
                    LoadGrammar(this.dictationGrammar);
                    break;
            }
        }

        #endregion

        #region Callbacks
        private void speechEngine_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
        {
            if (DebugMode)
                Console.WriteLine("Speech recognized: " + e.Result.Text);
            if (SpeechRecognized != null)
                SpeechRecognized(this, e);
        }

        private void speechEngine_SpeechRecognitionRejected(object sender, SpeechRecognitionRejectedEventArgs e)
        {
            if (DebugMode)
                Console.WriteLine("Speech hypothesis rejected: " + e.Result.Text);
        }

        private void speechEngine_SpeechHypothesized(object sender, SpeechHypothesizedEventArgs e)
        {
            if (DebugMode)
                Console.WriteLine("Speech hypothesized: " + e.Result.Text);
        }

        private void speechEngine_SpeechDetected(object sender, SpeechDetectedEventArgs e)
        {
            if (DebugMode)
                Console.WriteLine("Speech detected");
        }
        #endregion
    }
}
