import { create } from 'zustand'

import * as sdk from 'microsoft-cognitiveservices-speech-sdk'

import { visemeMap } from '@/config/speech'
import { Speaker, SpeakerPack } from '@/lib/speech/speaker'

const AZURE_SPEECH_SERVICE_REGION = "westus2"
const AZURE_SPEECH_SUBSCRIPTION_KEY = "d0114c07358b4641961af548538e04ba"

export enum SpeechStatus {
    Uninitialized,
    Preparing,
    Initialized
}

type SpeechJob = {
    ssml: string,
    speak: SpeakerPack
}

type SpeechState = {
    status: SpeechStatus
    init: VoidFunction

    synthesizer: sdk.SpeechSynthesizer | null

    speaker: Speaker | null
    setSpeaker: (value: Speaker) => void
    speak: (text: string, voice?: string) => void

    messageQueue: (SpeechJob | null)[]
    processQueue: VoidFunction

    handleVisemeReceived: (sender: sdk.SpeechSynthesizer, event: sdk.SpeechSynthesisVisemeEventArgs) => void
    handleWordBoundary: (sender: sdk.SpeechSynthesizer, event: sdk.SpeechSynthesisWordBoundaryEventArgs) => void
}

export const useSpeech = create<SpeechState>((set, get) => ({
    status: SpeechStatus.Uninitialized,
    init: async () => {
        if (get().status === SpeechStatus.Uninitialized) {
            set(state => ({ status: SpeechStatus.Preparing }))

            const audioConfig = sdk.AudioConfig.fromDefaultSpeakerOutput()
            var speechConfig = sdk.SpeechConfig.fromSubscription(AZURE_SPEECH_SUBSCRIPTION_KEY, AZURE_SPEECH_SERVICE_REGION)
            speechConfig.speechSynthesisOutputFormat = sdk.SpeechSynthesisOutputFormat.Raw22050Hz16BitMonoPcm;
            const synthesizer = new sdk.SpeechSynthesizer(speechConfig, audioConfig)
            synthesizer.wordBoundary = get().handleWordBoundary
            synthesizer.visemeReceived = get().handleVisemeReceived
            set(state => ({ synthesizer: synthesizer, status: SpeechStatus.Initialized }))
        }
    },

    speaker: null,
    setSpeaker: (speaker: Speaker) => set(state => ({ speaker: speaker })),

    speak: (text: string, voice: string = 'en-US-AndrewNeural') => {
        if (text === null) {
            get().messageQueue.push(null)
        } else {
            const ssml = "<speak version='1.0' " +
                "xmlns:mstts='http://www.w3.org/2001/mstts' xml:lang='en-US'>" +
                "<voice name='" + voice + "'>" +
                "<mstts:viseme type='FacialExpression'/>" +
                // "<mstts:viseme type='redlips_front'/>" +
                text.replaceAll('&', '&amp;').replaceAll('<', '&lt;').replaceAll('>', '&gt;') +
                "</voice>" +
                "</speak>";

            get().messageQueue.push({
                ssml: ssml,
                speak: {
                    audio: [],
                    words: [],
                    wtimes: [],
                    wdurations: [],
                    visemes: [],
                    animations: [],
                    vtimes: [],
                    vdurations: []
                }
            })
        }
        if (get().messageQueue.length === 1) {
            get().processQueue()
        }
    },

    synthesizer: null,

    messageQueue: [],
    processQueue: async () => {
        if (get().messageQueue.length) {
            const job = get().messageQueue[0]
            if (job === null) {
                get().messageQueue.shift()
                if (get().messageQueue.length === 0 && get().synthesizer) {
                    // const connection = sdk.Connection.fromSynthesizer(theSynthesizer())
                    // connection.close()
                }
            } else {
                get().synthesizer?.speakSsmlAsync(job.ssml, (result: sdk.SpeechSynthesisResult) => {
                    if (get().messageQueue[0] && get().messageQueue[0]!.speak) {
                        if (result.reason === sdk.ResultReason.SynthesizingAudioCompleted && result.audioDuration > 0) {
                            const job = get().messageQueue[0]!
                            job.speak.audio.push(result.audioData)
                            get().speaker?.play(job.speak)
                        }
                        get().messageQueue.shift()
                        get().processQueue()
                    }
                }, (err: string) => {
                    get().messageQueue.shift()
                    get().processQueue()
                })
            }
        }
    },

    handleWordBoundary: (sender: sdk.SpeechSynthesizer, event: sdk.SpeechSynthesisWordBoundaryEventArgs) => {
        if (get().messageQueue[0] && get().messageQueue[0]!.speak) {
            const o = get().messageQueue[0]!.speak
            const word = event.text
            const time = event.audioOffset / 10000
            const duration = event.duration / 10000
            if (event.boundaryType === 'PunctuationBoundary' && o.words.length) {
                o.words[o.words.length - 1] += word
            } else if (event.boundaryType === 'WordBoundary' || event.boundaryType === 'PunctuationBoundary') {
                o.words.push(word)
                o.wtimes.push(time)
                o.wdurations.push(duration)
            }
        }
    },

    handleVisemeReceived: (sender: sdk.SpeechSynthesizer, event: sdk.SpeechSynthesisVisemeEventArgs) => {
        if (get().messageQueue[0] && get().messageQueue[0]!.speak) {
            const o = get().messageQueue[0]!.speak
            const viseme = visemeMap[event.visemeId]
            const time = event.audioOffset / 10000
            if (o.vdurations.length) {
                if (o.visemes[o.visemes.length - 1] === '\n') {
                    o.visemes.pop()
                    o.vtimes.pop()
                    o.vdurations.pop()
                } else {
                    o.vdurations[o.vdurations.length - 1] = time - o.vtimes[o.vdurations.length - 1]
                }
            }
            if (event.animation.length > 0) {
                const animation = JSON.parse(event.animation)
                o.animations.push(animation)
            }

            o.visemes.push(viseme)
            o.vtimes.push(time)
            o.vdurations.push(75)
        }
    },
}))