﻿
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using Microsoft.Kinect.Interop;
using Microsoft.Kinect;
using Microsoft.Speech.Recognition;
using System.IO;
using System.Threading;
using Microsoft.Speech.AudioFormat;


namespace SharePointExplorer
{
    public class KinectSpeechControl
    {
        #region "Declarations"

        public KinectSensor kinectSensor { get; set; }

        RecognizerInfo ri;
        KinectAudioSource source;
        Grammar g;
        SpeechRecognitionEngine sre;
        string TextToRead = string.Empty;

        private SpeechRecognitionEngine speechRecognizer;

        private const int WaveImageWidth = 500;
        private const int WaveImageHeight = 100;

        #endregion

        public KinectSpeechControl(string _TextToRead,KinectSensor sensor)
        {
            TextToRead = _TextToRead;
            kinectSensor = sensor;
            Initialize();
        }

        
        private void Initialize()
        {
            if (kinectSensor == null)
                return;

            ri = GetKinectRecognizer();

            if (ri == null)
            {
                return;
            }

            this.speechRecognizer = this.CreateSpeechRecognizer();
            //kinectSensor.Start();
            this.Start();
            //LoadKinectAudio();

        }



        

        private SpeechRecognitionEngine CreateSpeechRecognizer()
        {
            RecognizerInfo ri = GetKinectRecognizer();
            if (ri == null)
            {
                
                return null;
            }

            SpeechRecognitionEngine sre;
            try
            {
                sre = new SpeechRecognitionEngine(ri.Id);
            }
            catch
            {
                
                return null;
            }

            var colors = new Choices();
            colors.Add("red");
            colors.Add("green");
            colors.Add("blue");

            var gb = new GrammarBuilder { Culture = ri.Culture };
            gb.Append(colors);

            // Create the actual Grammar instance, and then load it into the speech recognizer.
            var g = new Grammar(gb);

            sre.LoadGrammar(g);
            sre.SpeechRecognized += this.SreSpeechRecognized;
            sre.SpeechHypothesized += this.SreSpeechHypothesized;
            sre.SpeechRecognitionRejected += this.SreSpeechRecognitionRejected;

            return sre;
        }

        private void StartAudioStream()
        {
            using (Stream s = source.Start())
            {
                sre.SetInputToAudioStream(
                    s, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));


                //sre.RecognizeAsync();
                sre.RecognizeAsync(RecognizeMode.Multiple);
                
                //Console.WriteLine("Stopping recognizer ...");
                //sre.RecognizeAsyncStop();
            }
        }

        private void Start()
        {
            var audioSource = this.kinectSensor.AudioSource;
            //audioSource.BeamAngleMode = BeamAngleMode.Adaptive;
            var kinectStream = audioSource.Start();
            //this.stream = new EnergyCalculatingPassThroughStream(kinectStream);
            this.speechRecognizer.SetInputToAudioStream(
                kinectStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
            this.speechRecognizer.RecognizeAsync(RecognizeMode.Multiple);
            
        }



        private void SreSpeechRecognitionRejected(object sender, SpeechRecognitionRejectedEventArgs e)
        {
            Console.WriteLine("\nSpeech Rejected");
            if (e.Result != null)
            {
                //DumpRecordedAudio(e.Result.Audio);
            }
        }

        private void SreSpeechHypothesized(object sender, SpeechHypothesizedEventArgs e)
        {
            Console.Write("\rSpeech Hypothesized: \t{0}", e.Result.Text);
        }

        private void SreSpeechRecognized(object sender, SpeechRecognizedEventArgs e)
        {
            if (e.Result.Confidence >= 0.7)
            {
                Console.WriteLine("\nSpeech Recognized: \t{0}\tConfidence:\t{1}", e.Result.Text, e.Result.Confidence);
            }
            else
            {
                Console.WriteLine("\nSpeech Recognized but confidence was too low: \t{0}", e.Result.Confidence);
                DumpRecordedAudio(e.Result.Audio);
            }
        }

        private static void DumpRecordedAudio(RecognizedAudio audio)
        {
            if (audio == null)
            {
                return;
            }

            int fileId = 0;
            string filename;
            while (File.Exists((filename = "RetainedAudio_" + fileId + ".wav")))
            {
                fileId++;
            }

            Console.WriteLine("\nWriting file: {0}", filename);
            using (var file = new FileStream(filename, System.IO.FileMode.CreateNew))
            {
                audio.WriteToWaveStream(file);
            }
        }


        private static RecognizerInfo GetKinectRecognizer()
        {
            Func<RecognizerInfo, bool> matchingFunc = r =>
            {
                string value;
                r.AdditionalInfo.TryGetValue("Kinect", out value);
                return "True".Equals(value, StringComparison.InvariantCultureIgnoreCase) && "en-US".Equals(r.Culture.Name, StringComparison.InvariantCultureIgnoreCase);
            };
            return SpeechRecognitionEngine.InstalledRecognizers().Where(matchingFunc).FirstOrDefault();
        }


        
    }
        
}



