import {Button, Empty, Popover, TextArea} from "@douyinfe/semi-ui";
import {useEffect, useRef, useState} from "react";
import {IconMicrophone, IconStop} from '@douyinfe/semi-icons';
import {
    analysisSlice,
    getSpeechBaseSlice,
    getSpeechDiseaseSlice,
    getSpeechHabitSlice, getSpeechMedicalSlice, getSpeechMentalSlice
} from "../../../api/ApiSpeechAnalysis.ts";
// 手动声明类型，防止IDE报错
declare global {
    interface Window {
        SpeechRecognition: any;
        webkitSpeechRecognition: any;
    }
}

//===============================
// 这一页中的报错都属于正常现象，不必理会
//===============================

const STTPopOver = (props: { setFormValue: (key: string, value: string | number) => void }) => {
    const [visible, setVisible] = useState(false);
    const [isListening, setListening] = useState(false);
    const [showUpload, setShowUpload] = useState(false);
    const [upload, setUpload] = useState('AI填表');
    const [transcript, setTranscript] = useState('请根据表单的标题与内容说出您的情况，您的声音将在本地进行解析，请放心使用！'+
        `AI会自动识别您的相关信息填入表格中。
                
您可以这样说：
我是男/女的，今年多少岁，多高，体重多少公斤，有没有上过学，高中还是本科硕士
喝不喝酒？之前有没有过什么病？最近记忆好不好，睡得好不好？
每周运动几小时，吃饭吃得下吗？记忆有问题吗？生活还能自理吗？`
    );
    const recognition = useRef<any | null>(null);
    useEffect(() => {
        // 检查浏览器是否支持 SpeechRecognition
        const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
        let recordText = ""
        if (SpeechRecognition) {
            recognition.current = new SpeechRecognition();
            // 设置识别语言为中文
            recognition.current.lang = 'zh-CN';
            // 设置为连续识别
            recognition.current.continuous = true;
            //不返回临时结果
            recognition.current.interimResults = false;

            // 监听识别结果事件
            recognition.current.onresult = (event: any) => {
                const current = event.resultIndex;
                const result = event.results[current][0].transcript;
                recordText += result
                setTranscript(recordText)
            };
            // 监听识别开始事件
            recognition.current.onstart = function () {
                setListening(true)
                setTranscript("正在聆听中...")
                recordText = ""
                setShowUpload(false);
            }
            // 监听识别结束事件
            recognition.current.onend = () => {
                setListening(false)
                if (recordText != "") {
                    setUpload("AI分析")
                    setShowUpload(true);
                } else {
                    setTranscript("不好意思，我刚刚没听清~")
                }
                // 可在这里添加结束识别后的逻辑
            };
            // 监听识别错误事件
            recognition.current.onerror = ({event}: { event: any }) => {
                if (event.error == "no-speech") {
                    setTranscript("没有听到任何声音");
                } else {
                    setTranscript(event.error);
                }
            };
        } else {
            setTranscript("浏览器暂不支持本功能");
        }
    }, []);
    const startRecognition = () => {
        if (recognition.current) {
            recognition.current.start();
        }
    };

    const stopRecognition = () => {
        if (recognition.current) {
            recognition.current.stop();
        }
    };
    const analysisJson = (promise: Promise<Response>, callback: () => void) => {
        promise.then((res) => {
            res.json().then((data) => {
                const json = analysisSlice(data)
                if (json) {
                    for (const val in json) {
                        const value = json[val];
                        console.log(val,value)
                        if ((typeof value == "string" && value != "") || (typeof value == "number")) {
                            props.setFormValue(val, value as number);
                        }
                    }
                }else {
                    callback()
                }
            }).catch(() => {
                callback()
                }
            )
        })
    }
    const uploadSpeech = () => {
        let ans = 0
        const callback = () => {
            ans += 1
            if (ans==5){
                setUpload("分析失败，点击重试")
            }
        }
        analysisJson(getSpeechBaseSlice(transcript),callback)
        analysisJson(getSpeechDiseaseSlice(transcript),callback)
        analysisJson(getSpeechHabitSlice(transcript),callback)
        analysisJson(getSpeechMentalSlice(transcript),callback)
        analysisJson(getSpeechMedicalSlice(transcript),callback)
    }
    const content = (
        <div style={{textAlign: "center", verticalAlign: "middle"}}>
            <Button icon={!isListening ? <IconMicrophone size={"extra-large"}/> : <IconStop size={"extra-large"}/>}
                    theme="solid" style={{
                height: 60, width: 60, marginTop: 20,
            }} aria-label="录音" onClick={() => {
                //点击开始录音
                if (isListening) {
                    stopRecognition()
                } else {
                    startRecognition()
                }
            }}/>
            {/*最下面的文字显示*/}
            <Empty
                title={!isListening ? "开始录音" : "结束录音"}
            />
            <TextArea placeholder='large' autosize style={{width: 400, margin: 10}} value={transcript}
                      onChange={(value) => setTranscript(value)}/>
            {/*判断是否要上传*/}
            {showUpload ? (
                <Button block type='primary' style={{padding: 10}} onClick={uploadSpeech}>{upload}</Button>) : ""}
        </div>
    )
    return (<Popover trigger="custom" position={"topRight"} visible={visible} content={content}>
        {/*是否显示Popover*/}
        <Button icon={<IconMicrophone/>} theme={"solid"} onClick={() => setVisible(!visible)}
                style={{position: "absolute", bottom: 20, right: 0}}>AI辅助声音填表</Button>
    </Popover>)
}
export default STTPopOver