﻿using System;
using System.Collections.Generic;
using System.Diagnostics.Contracts;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using Microsoft.CognitiveServices.Speech;
using Microsoft.CognitiveServices.Speech.Audio;
using Microsoft.CognitiveServices.Speech.Dialog;
using MvaLib.Diagnostics;
using Windows.Storage;

namespace Mva.Recognition
{
    public class LocalKeywordRecognizer2 : IDialogBackend
    {
        private IDialogAudioInputProvider audioSource;
        private DialogServiceConnector connector;
        private PushAudioInputStream inputStream;
        private bool alreadyDisposed = false;
        private KeywordRecognitionModel model = null;

        public event Action<string> KeywordRecognizing;
        public event Action<string> KeywordRecognized;

        public event Action<string> SpeechRecognizing;
        public event Action<string> SpeechRecognized;

        public event Action<DialogErrorInformation> ErrorReceived;

        public LocalKeywordRecognizer2()
        {
            Debug.WriteLine("Is using LocalKeywordRecognizer2");
        }

        /// <summary>
        /// Sets up the initial state needed for Direct Line Speech, including creation of the
        /// underlying DialogServiceConnector and wiring of its events.
        /// </summary>
        /// <returns> A task that completes once initialization is complete. </returns>
        public async Task InitializeAsync()
        {
            Debug.WriteLine("InitializeAsync");
            // Default values -- these can be updated
            var configuration = this.CreateConfiguration();
            var keywordFile = await KeywordRegistration.Keyword.GetConfirmationKeywordFileAsync();
            Debug.WriteLine($"keywordFile:{keywordFile.Path}");
            this.model = KeywordRecognitionModel.FromFile(keywordFile.Path);

            this.inputStream = AudioInputStream.CreatePushStream();

            //this.connector?.Dispose();
            this.connector = new DialogServiceConnector(configuration, AudioConfig.FromStreamInput(this.inputStream));

            this.connector.Recognizing += (s, e) =>
            {
                switch (e.Result.Reason)
                {
                    case ResultReason.RecognizingKeyword:
                        Debug.WriteLine($"Local model recognized keyword \"{e.Result.Text}\"");
                        this.KeywordRecognized?.Invoke(e.Result.Text);
                        break;
                    case ResultReason.RecognizingSpeech:
                        this.SpeechRecognizing?.Invoke(e.Result.Text);
                        break;
                    default:
                        throw new InvalidOperationException();
                }
            };
            this.connector.Recognized += (s, e) =>
            {
                Debug.WriteLine($"Connector recognized: {e.Result.Text}, reason:{e.Result.Reason}");
                switch (e.Result.Reason)
                {
                    case ResultReason.RecognizedKeyword:
                        //this.KeywordRecognized?.Invoke(e.Result.Text);
                        break;
                    case ResultReason.RecognizedSpeech:
                        this.SpeechRecognized?.Invoke(e.Result.Text);
                        break;
                    case ResultReason.NoMatch:
                        //this.KeywordRecognized?.Invoke(null);
                        break;
                    default:
                        throw new InvalidOperationException();
                }
            };
            this.connector.Canceled += (s, e) =>
            {
                var code = (int)e.ErrorCode;
                var message = $"{e.Reason.ToString()}: {e.ErrorDetails}";
                //this.ErrorReceived?.Invoke(new DialogErrorInformation(code, message));
            };

            Debug.WriteLine("InitializeAsync...end");
        }

        /// <summary>
        /// Sets the audio source to be used by this dialog backend and registers its data
        /// for use.
        /// </summary>
        /// <param name="source"> The agent audio source to use. </param>
        public void SetAudioSource(IDialogAudioInputProvider source)
        {
            Contract.Requires(source != null);

            this.audioSource = source;
            this.audioSource.DataAvailable += (bytes) =>
            {
                this.inputStream?.Write(bytes.ToArray());
            };
        }

        /// <summary>
        /// Begins a new turn based on the input audio available from the provider.
        /// </summary>
        /// <param name="performConfirmation"> Whether keyword confirmation should be performed. </param>
        /// <returns> A task that completes immediately and does NOT block on start of turn. </returns>
#pragma warning disable CS1998 // Async method lacks 'await' operators and will run synchronously
        public async Task StartAudioTurnAsync(bool performConfirmation)
#pragma warning restore CS1998 // Async method lacks 'await' operators and will run synchronously
        {
            if (!performConfirmation || this.model == null)
            {
                //_ = this.connector.ListenOnceAsync();
            }
            else
            {
                Debug.WriteLine($"LocalKeywordRecognizer2， StartAudioTurnAsync");
                _ = this.connector.StartKeywordRecognitionAsync(this.model);
            }
        }

        public void StopAudioTurn()
        {

        }
        /// <summary>
        /// Basic implementation of IDisposable pattern.
        /// </summary>
        public void Dispose()
        {
            this.Dispose(true);
            GC.SuppressFinalize(this);
        }

        /// <summary>
        /// Basic implementation of IDisposable pattern.
        /// </summary>
        /// <param name="disposing"> Whether managed resource disposal is happening. </param>
        protected virtual void Dispose(bool disposing)
        {
            if (!this.alreadyDisposed)
            {
                if (disposing)
                {
                    this.connector?.Dispose();
                    this.inputStream?.Dispose();
                }

                this.alreadyDisposed = true;
            }
        }

        private DialogServiceConfig CreateConfiguration()
        {
            var speechKey = LocalSettingsHelper.SpeechSubscriptionKey;
            var speechRegion = LocalSettingsHelper.AzureRegion;
            var customSpeechId = LocalSettingsHelper.CustomSpeechId;
            var customVoiceIds = LocalSettingsHelper.CustomVoiceIds;

            var config = BotFrameworkConfig.FromSubscription(
                speechKey,
                speechRegion);

            // Disable throttling of input audio (send it as fast as we can!)
            config.SetProperty("SPEECH-AudioThrottleAsPercentageOfRealTime", "9999");
            config.SetProperty("SPEECH-TransmitLengthBeforThrottleMs", "10000");

            if (!string.IsNullOrEmpty(customSpeechId))
            {
                config.SetServiceProperty("cid", customSpeechId, ServicePropertyChannel.UriQueryParameter);

                // Custom Speech does not support Keyword Verification - Remove line below when supported.
                config.SetProperty("KeywordConfig_EnableKeywordVerification", "false");
            }

            if (!string.IsNullOrEmpty(customVoiceIds))
            {
                config.SetProperty(PropertyId.Conversation_Custom_Voice_Deployment_Ids, customVoiceIds);
            }

            //if (LocalSettingsHelper.EnableSdkLogging)
            //{
            //    var logPath = $"{ApplicationData.Current.LocalFolder.Path}\\sdklog.txt";
            //    config.SetProperty(PropertyId.Speech_LogFilename, logPath);
            //}

            return config;
        }
    }
}
