import { computed, ref } from "vue"
import { webmFixDuration } from "./BlobFix"
import { watch } from "vue"
import getBrowserInfo from "browser-tool"

export type Status = "not-ready" | "leisure" | "recording" | "processing"

// Web speech api

declare const webkitSpeechRecognition: any

export function useSpeechRecognition() {
    const results = ref([] as string[])
    const status = ref("leisure" as Status)

    if (!webkitSpeechRecognition) {
        throw "browser not supper Web speech api"
    }

    const speechRecognition = new webkitSpeechRecognition()
    speechRecognition.continuous = true
    speechRecognition.lang = "en"

    speechRecognition.addEventListener("result", (e: any) => {
        const r = e.results as SpeechRecognitionResultList
        const resultTextArray = [] as string[]
        for (const result of r) {
            resultTextArray.push(result.item(0).transcript)
        }
        results.value = resultTextArray
    })

    speechRecognition.addEventListener("error", () => {
        status.value = "leisure"
    })

    function start() {
        if (status.value !== "leisure") {
            throw `status is ${status.value} , can't start`
        }
        status.value = "recording"
        results.value = []
        speechRecognition.start()
    }

    function stop(): Promise<string> {
        if (status.value !== "recording") {
            throw "not start record , can't stop"
        }

        status.value = "processing"
        speechRecognition.stop()
        return new Promise((r) => {
            if (results.value.length < 1) {
                setTimeout(() => {
                    if (results.value.length < 1) {
                        r("")
                    } else {
                        r(results.value.reduce((p, c) => `${p} ${c}`))
                    }
                    status.value = "leisure"
                }, 1000)
            } else {
                r(results.value.reduce((p, c) => `${p} ${c}`))
                status.value = "leisure"
            }
        })
    }

    return {
        status,
        start,
        stop,
    }
}

// Transformers.js AI automatic speech recognition

export const useAutomaticSpechRecognition = getUseAutomaticSpechRecognition()

function getUseAutomaticSpechRecognition() {
    const workerReady = ref(false)
    let worker: Worker

    function initWorker() {
        worker = new Worker(new URL("./worker.js", import.meta.url), {
            type: "module",
        })
        worker.addEventListener(
            "message",
            (e) => {
                if (e.data == "ready") {
                    workerReady.value = true
                } else {
                    alert("初始化语音识别失败，请刷新重试或检查网络")
                    console.log("初始化语音识别失败：")
                    throw e.data
                }
            },
            { once: true }
        )
    }

    return () => {
        if (worker == undefined) {
            initWorker()
        }

        let recorder: MediaRecorder

        const status = ref("not-ready" as Status)
        const recorderReady = ref(false)
        const startTime = ref(0)
        const chunks = ref([] as Blob[])

        const ready = computed(() => recorderReady.value && workerReady.value)

        watch(
            () => ready.value,
            (ready) => {
                if (ready) {
                    status.value = "leisure"
                }
            }
        )

        {
            navigator.mediaDevices
                .getUserMedia({ audio: true })
                .then((stream) => {
                    recorder = new MediaRecorder(stream, {
                        mimeType: getMimeType(),
                    })

                    recorder.addEventListener(
                        "dataavailable",
                        async (result) => {
                            if (result.data.size > 0) {
                                chunks.value.push(result.data)
                            }
                            if (recorder.state === "inactive") {
                                const duration = Date.now() - startTime.value

                                let blob = new Blob(chunks.value, {
                                    type: recorder.mimeType,
                                })

                                if (recorder.mimeType === "audio/webm") {
                                    blob = await webmFixDuration(
                                        blob,
                                        duration,
                                        blob.type
                                    )
                                }

                                chunks.value = []

                                const fileReader = new FileReader()
                                fileReader.addEventListener(
                                    "loadend",
                                    async () => {
                                        const audioCTX = new AudioContext({
                                            sampleRate: 16000,
                                        })
                                        const arrayBuffer =
                                            fileReader.result as ArrayBuffer
                                        const decoded =
                                            await audioCTX.decodeAudioData(
                                                arrayBuffer
                                            )
                                        worker.postMessage(
                                            decoded.getChannelData(0)
                                        )
                                    }
                                )
                                fileReader.readAsArrayBuffer(blob)
                            }
                        }
                    )

                    recorderReady.value = true
                })
        }

        function start() {
            if (!ready.value) {
                throw "not ready"
            }
            if (status.value !== "leisure") {
                throw `can't start. because status = ${status.value}`
            }
            status.value = "recording"
            startTime.value = Date.now()
            recorder.start()
        }

        function stop() {
            if (status.value !== "recording") {
                throw "not start recording"
            }
            status.value = "processing"
            let resolve: (e: any) => void
            const promise: Promise<string> = new Promise((r) => (resolve = r))

            worker.addEventListener("message", (e) => {
                resolve(e.data.text)
                status.value = "leisure"
            })

            recorder.stop()
            return promise
        }

        return {
            status,
            start,
            stop,
        }
    }

    function getMimeType() {
        const types = [
            "audio/webm",
            "audio/mp4",
            "audio/ogg",
            "audio/wav",
            "audio/aac",
        ]
        for (let i = 0; i < types.length; i++) {
            if (MediaRecorder.isTypeSupported(types[i])) {
                return types[i]
            }
        }
        return undefined
    }
}

// Auto select asr api

export function useASR() {
    // return useSpeechRecognition()

    const info = getBrowserInfo()
    if (info.device == "PC" && info.engine == "Blink") {
        return useSpeechRecognition()
    } else {
        // return useSpeechRecognition()
        return useAutomaticSpechRecognition()
    }
}
