﻿using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using Microsoft.Research.Kinect.Audio;
using Microsoft.Speech.AudioFormat;
using Microsoft.Speech.Recognition;
using System.IO;
using System.Diagnostics;

namespace NIP.Speech.Reco
{
    /// <summary>
    /// Microsoft Speech Recognition Engine Version of Reco
    /// </summary>
    public class MSRReco
    {
        private const string RecognizerId = "SR_MS_en-US_Kinect_10.0";
        private string grammarFileName = "Grammar\\NIRAGrammar.xml";

        #region Events
        public event EventHandler<EventArgs.CommandRecognizedEventArgs> CommandRecognized;
        private SpeechRecognitionEngine sre;
        private KinectAudioSource source;
        #endregion Events

        public MSRReco()
        {
            source = new KinectAudioSource();
            
            source.FeatureMode = true;
            source.AutomaticGainControl = false; //Important to turn this off for speech recognition
            source.SystemMode = SystemMode.OptibeamArrayOnly; //No AEC for this sample

            RecognizerInfo ri = SpeechRecognitionEngine.InstalledRecognizers().Where(r => r.Id == RecognizerId).FirstOrDefault();

            if (ri == null)
            {
                Console.WriteLine("Could not find speech recognizer: {0}. Please refer to the sample requirements.", RecognizerId);
                return;
            }

            Debug.WriteLine("Using: {0}", ri.Name);

            sre = new SpeechRecognitionEngine(ri.Id);
            
            // Create the actual Grammar instance, and then load it into the speech recognizer.
            //var g = new Grammar(gb);                    
            var g = new Grammar(grammarFileName);

            sre.LoadGrammar(g);
            sre.SpeechRecognized += SreSpeechRecognized;
            sre.SpeechHypothesized += SreSpeechHypothesized;
            sre.SpeechRecognitionRejected += SreSpeechRecognitionRejected;

            Stream s = source.Start();
            
            sre.SetInputToAudioStream(s,
                                        new SpeechAudioFormatInfo(
                                            EncodingFormat.Pcm, 16000, 16, 1,
                                            32000, 2, null));

            Debug.WriteLine("Recognizing. Say: 'red', 'green' or 'blue'. Press ENTER to stop");

            sre.RecognizeAsync(RecognizeMode.Multiple);
            //Console.ReadLine();
            //Console.WriteLine("Stopping recognizer ...");
            //sre.RecognizeAsyncStop();
     
        }

        static void SreSpeechRecognitionRejected(object sender, SpeechRecognitionRejectedEventArgs e)
        {
            Debug.WriteLine("\nSpeech Rejected");
            if (e.Result != null)
                DumpRecordedAudio(e.Result.Audio);
        }

        static void SreSpeechHypothesized(object sender, SpeechHypothesizedEventArgs e)
        {
            Debug.Write("\rSpeech Hypothesized: \t{0}", e.Result.Text);
        }

        static void SreSpeechRecognized(object sender, SpeechRecognizedEventArgs e)
        {
            //This first release of the Kinect language pack doesn't have a reliable confidence model, so 
            //we don't use e.Result.Confidence here.
            Debug.WriteLine("\nSpeech Recognized: \t{0}", e.Result.Text);
        }

        private static void DumpRecordedAudio(RecognizedAudio audio)
        {
            if (audio == null) return;

            int fileId = 0;
            string filename;
            while (File.Exists((filename = "RetainedAudio_" + fileId + ".wav")))
                fileId++;

            Debug.WriteLine("\nWriting file: {0}", filename);
            using (var file = new FileStream(filename, System.IO.FileMode.CreateNew))
                audio.WriteToWaveStream(file);
        }
    }
}
