﻿namespace VoiceDominion
{
    using System;
    using System.Collections.Generic;
    using System.Configuration;
    using System.Globalization;
    using System.Media;
    using System.Speech.Recognition;
    using System.Speech.Synthesis;

    public sealed class Speech : IDisposable
    {
        private static volatile Speech instance; // Instance of this class - singleton pattern

        private static object syncRoot = new object(); // http://msdn.microsoft.com/en-us/library/ff650316.aspx

        private bool disposed = false;

        private Speech()
        {
            //// TTS Config
            this.TextToSpeech = new SpeechSynthesizer();

            try
            {
                this.TextToSpeech.SelectVoice(ConfigurationManager.AppSettings["TTSVoice"]);
            }
            catch (ArgumentException ex)
            {
                Console.WriteLine(string.Format(CultureInfo.InvariantCulture, "Could not load the voice named {0} - {1}", ConfigurationManager.AppSettings["TTSVoice"], ex));
            }

            //// Voice Recognition config
            this.SpeechToText = new SpeechRecognitionEngine();
            this.ReloadGrammar();

            this.SpeechToText.SpeechRecognized += this.ReactToSpeechRecognized;
            this.SpeechToText.SetInputToDefaultAudioDevice();

            //// Computer noise 
            this.AudioPlayer = new SoundPlayer();
            this.AudioPlayer.SoundLocation = @"computer_work_beep.wav";
        }

        public static Speech Instance
        {
            get
            {
                if (instance == null)
                {
                    lock (syncRoot)
                    {
                        if (instance == null)
                        {
                            instance = new Speech();
                        }
                    }
                }

                return instance;
            }
        }

        public SpeechSynthesizer TextToSpeech { get; set; } // Text to speech (speech synthesis) instance

        public SpeechRecognitionEngine SpeechToText { get; set; } // Speech to text (speech recognition) instance

        public SoundPlayer AudioPlayer { get; set; } // Audio player used to play a .wav file over the speakers when a command is recognized

        public void Run()
        {
            this.SpeechToText.RecognizeAsync(RecognizeMode.Multiple);
        }

        public void ReloadGrammar()
        {
            var grammar = new Grammar(@"SpeechRecognitionGrammar.xml");

            this.SpeechToText.RequestRecognizerUpdate(); // Wait for engine to finish previous work and be in a state where we can make changes
            this.SpeechToText.UnloadAllGrammars();
            this.SpeechToText.RequestRecognizerUpdate(); // Wait for engine to finish previous work and be in a state where we can make changes
            this.SpeechToText.LoadGrammar(grammar);
        }

        public void Dispose()
        {
            this.Dispose(true);

            // Call SupressFinalize in case a subclass implements a finalizer.
            GC.SuppressFinalize(this);
        }

        // http://msdn.microsoft.com/en-us/library/fs2xkftw(v=vs.110).aspx
        public void Dispose(bool disposing)
        {
            // If you need thread safety, use a lock around these  
            // operations, as well as in your methods that use the resource. 
            if (!this.disposed)
            {
                if (disposing)
                {
                    if (this.TextToSpeech != null)
                    {
                        this.TextToSpeech.Dispose();
                    }

                    if (this.SpeechToText != null)
                    {
                        this.SpeechToText.Dispose();
                    }

                    if (this.AudioPlayer != null)
                    {
                        this.AudioPlayer.Dispose();
                    }
                }

                this.TextToSpeech = null;
                this.SpeechToText = null;
                this.AudioPlayer = null;

                this.disposed = true;
            }
        }

        private void ReactToSpeechRecognized(object sender, SpeechRecognizedEventArgs e)
        {
            var minConfidence = float.Parse(ConfigurationManager.AppSettings["MinSTTConfidence"]);

            Console.WriteLine("You said: " + e.Result.Text + " " + e.Result.Confidence);

            if (e.Result.Confidence > minConfidence)
            {
                Console.WriteLine(string.Format(CultureInfo.InvariantCulture, "Confidence > {0}", minConfidence));

                var tags = new Dictionary<string, string>();

                foreach (var semantic in e.Result.Semantics)
                {
                    Console.WriteLine(string.Format(CultureInfo.InvariantCulture, "tag {0} = {1}", semantic.Key, semantic.Value.Value.ToString()));
                    tags[semantic.Key] = semantic.Value.Value.ToString();
                }

                if (tags.ContainsKey("module") && VoiceDominionApp.Modules.ContainsKey(tags["module"]))
                {
                    Console.WriteLine(string.Format(CultureInfo.InvariantCulture, "Sending command to the {0} module.", tags["module"]));
                    VoiceDominionApp.Modules[tags["module"]].ProcessCommand(tags);
                }
            }
        }
    }
}
