﻿using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using Microsoft.Speech.AudioFormat;
using Microsoft.Speech.Recognition;
using System.IO;
using Microsoft.Kinect;
using System.Windows;
using System.Speech;
using System.Diagnostics;
using System.Data.SQLite;

namespace DetectingKinectSencor._02_Oral_Communication
{
    class SpeechRecognitionManager
    {

        //Variable definition

        private System.Speech.Recognition.SpeechRecognitionEngine speechEngine;
        private KinectSensor kinectSensor;

        private Stopwatch counterWordsPerMinute;
        private int wordsPerMinute;
        private SQLiteConnection databaseConnection;
        private SQLiteCommand sqliteCommandWordsPerMinute;
        public DateTime startedCaptureTime;
        private DateTime WordsPerMinuteTimePoint;
        private int timeSpanInSeconds = 60;
        private List<String> listOfWords;


        /// <summary>
        /// Constructor to intital the speech recognizition
        /// </summary>
        /// <param name="kinectSensor"></param>
        public SpeechRecognitionManager(KinectSensor kinectSensor, SQLiteConnection databaseConnectionOralCommunicationManager)
        {
            this.kinectSensor = kinectSensor;
            counterWordsPerMinute = new Stopwatch();


            this.databaseConnection = databaseConnectionOralCommunicationManager;

            // create a new SQL command:
            sqliteCommandWordsPerMinute = databaseConnection.CreateCommand();


            // Gets the speech engine, depending which engines are available on the computer
            // german or english
            System.Speech.Recognition.RecognizerInfo ri = GetKinectRecognizer();

            if (ri != null)
            {
                this.speechEngine = new System.Speech.Recognition.SpeechRecognitionEngine(ri.Id);

                listOfWords = new List<String>();

                // Create a custom dictation grammar
                System.Speech.Recognition.DictationGrammar customDictationGrammar =
                                                                new System.Speech.Recognition.DictationGrammar();

                customDictationGrammar.Name = "Dictation";
                customDictationGrammar.Enabled = true;

                // Load the grammar into the recognition engine
                this.speechEngine.LoadGrammar(customDictationGrammar);


                // Registers the event handler SpeechRecognized is executed,
                // when an attempted command is recognized as being a member of the command set.
                // It passes the event handler a SpeechRecognizedEventArgs object that contains
                // the recognized command
                speechEngine.SpeechRecognized += SpeechRecognized;

                speechEngine.SetInputToAudioStream(this.kinectSensor.AudioSource.Start(),
                                                    new System.Speech.AudioFormat.SpeechAudioFormatInfo(
                                                    System.Speech.AudioFormat.EncodingFormat.Pcm,
                                                    16000, 16, 1, 32000, 2, null));



                // Starts a background thread
                speechEngine.RecognizeAsync(System.Speech.Recognition.RecognizeMode.Multiple);

            }
            else
            {
                MessageBox.Show("No speech Recognizer");
            }
        }






        /// <summary>
        /// Handler for recognized speech events.
        /// </summary>
        /// <param name="sender">object sending the event.</param>
        /// <param name="e">event arguments.</param>
        private void SpeechRecognized(object sender, System.Speech.Recognition.SpeechRecognizedEventArgs e)
        {

            if (!counterWordsPerMinute.IsRunning)
            {
                counterWordsPerMinute.Start();
                WordsPerMinuteTimePoint = DateTime.Now;
            }

            wordsPerMinute += e.Result.Words.Count;

            for (int i = 0; i < e.Result.Words.Count; i++)
            {
                listOfWords.Add(e.Result.Words[i].LexicalForm);
            }

            if (counterWordsPerMinute.Elapsed.TotalSeconds >= timeSpanInSeconds)
            {
                sqliteCommandWordsPerMinute.CommandText =
                            "INSERT INTO oral_communication (capturedTime, timepoint, timespanMinute, timespanSec, wordsPerMinute) VALUES ('" + startedCaptureTime + "', '"
                                                                                                                                 + WordsPerMinuteTimePoint + "', "
                                                                                                                                 + 0 + ","
                                                                                                                                 + timeSpanInSeconds + ","
                                                                                                                                 + wordsPerMinute 
                                                                                                                                 + ");";
                sqliteCommandWordsPerMinute.ExecuteNonQuery();

                Console.WriteLine("Words per minute timepoint: " + WordsPerMinuteTimePoint + " number of words: " + wordsPerMinute);

                wordsPerMinute = 0;
                counterWordsPerMinute.Reset();
            }





        }

        public void stopRecognition()
        {
            speechEngine.RecognizeAsyncStop();

            for (int i = 0; i < listOfWords.Count; i++)
            {
                System.Console.WriteLine("words: " + listOfWords[i]);
            }

            System.Console.WriteLine("Total number of words: " + listOfWords.Count);
        }



        /// <summary>
        /// Gets the metadata for the speech recognizer (acoustic model) most suitable to
        /// process audio from Kinect device.
        /// </summary>
        /// <returns>
        /// RecognizerInfo if found, <code>null</code> otherwise.
        /// </returns>
        private static System.Speech.Recognition.RecognizerInfo GetKinectRecognizer()
        {
            foreach (System.Speech.Recognition.RecognizerInfo recognizer in System.Speech.Recognition.SpeechRecognitionEngine.InstalledRecognizers())
            {
                //            string value;
                //            recognizer.AdditionalInfo.TryGetValue("Kinect", out value);

                //Looking for the Kinect value is only necessary if the Microsoft.Speech from kinect is used!
                //            if ("True".Equals(value, StringComparison.OrdinalIgnoreCase) && "de-DE".Equals(recognizer.Culture.Name, StringComparison.OrdinalIgnoreCase))
                if ("de-DE".Equals(recognizer.Culture.Name, StringComparison.OrdinalIgnoreCase))
                {
                    return recognizer;
                }
            }

            return null;
        }
    }
}
