import ClientJs from '../../base/clientjs.js'

// 定义WebSocket状态的枚举
const WebSocketState = {
    CONNECTING: 0,//连接还未开启
    OPEN: 1,//连接已开启并且可以通信
    CLOSING: 2,//连接正在关闭
    CLOSED: 3//连接已经关闭或者连接无法建立
};
// const webSocketUrl = process.env.VITE_AI_AUDIO_WEBSOCKET_URL;
//ws://172.32.147.51:8080/ws/ai/v2t?sessionId=test001
//ws://172.32.149.247:8080
// const webSocketUrl = 'wss://172.32.151.221:9090/ws/ai/v2t?sessionId=test002';//&aiApiCode=345&aiApiKey=key**7
const webSocketUrl = 'ws://172.32.149.247:8080/ws/ai/v2t?sessionId=test002';
// const webSocketUrl = 'wss://172.32.151.221:443/ws/ai/v2t'




/**
 * 处理音频流上传到AI平台的工具方法
 */
class AudioStreamer {
    /**
     * 构造器
     * @param {WebSocket服务器的地址} url 
     * @param 分片大小 chunkSize 
     * @param 连接websocket的次数 connectLimit 
     */
    constructor(chunkSize=1280,connectLimit=5) {
        this.url = webSocketUrl;
        this.chunkSize = chunkSize;
        this.ws = null;
        this._voiceId = this.getRandomStr();
        this.resultMap = {};//识别结果存放
        this.chunkIndex = 0;
        this.interval = 0;//每隔40ms上传
        this.timer = null;
        this.connectCnt = 1;
        this.connectLimit = connectLimit;
        this.audioResult = '';//音频流解析识别结果
        this.getAutioResultFlg = false;
    }

    // 分片处理音频流数据
    chunk(audioStream) {
        let chunks = [];
        // for (let i = 0; i < audioStream.length; i += this.chunkSize) {
        //     chunks.push(audioStream.slice(i, i + this.chunkSize));
        // }

        var binaryString = atob(audioStream);//将Base64字符串转换为原始字符串
        var bytes = new Uint8Array(binaryString.length);//创建Uint8Array对象来存储字节数组
        
        for (var i = 0; i < binaryString.length; ++i) {
            bytes[i] = binaryString.charCodeAt(i);//使用charCodeAt()函数获取字符的ASCII码值，并将其存储到字节数组中
        }
        for (let i = 0; i < bytes.length; i += this.chunkSize) {//循环字节数组，按chunkSize分片
            let chunk = bytes.subarray(i, i + this.chunkSize);
            // 使用btoa函数将每个块转换回base64字符串
            let base64Chunk = btoa(String.fromCharCode.apply(null, chunk));
            chunks.push(base64Chunk);
        }
        return chunks;
    }


    //设置音频上传的接口参数
    setAudioMessage(mesbase64 = '',endFlag=false) {
        const audioSessionParam = {
            aue:'raw',//[raw，opus、opus-wb、opus-swb]
            rst:'plain',//返回结果的格式[json,plain]，未传入默认json
            eos:'600000',//说完一句话到下一句话的最大间隔
            bos:'600000'//音频流开始到说第一句话的最大间隔，
        };
        // 创建一个包含参数的对象
        const message = {
           sessionParam:audioSessionParam,//创建会话参数，只要求传一次，后续持续向服务端写音频时可以忽略
           samples:mesbase64,//单声道音频数据，采用base64编码，建议音频流每40ms发送1280字节
           endFlag:endFlag//一直发送语音保持false就可以了，关闭麦克风的时候再传true关闭websocket连接
       };
   
       return message;
   }



    // 上传音频流
    sendAudio(chunks) {
        let url = this.url;
        let sessionId = this.getRandomStr(100000000);
        if(~url.indexOf('?')){
            url+='&'
        }else {
            url+='?';
        }
        url+=`sessionId=${sessionId}`;
        console.log('websocketUrl=='+url);
        try{
            this.ws = new WebSocket(this.url);
        }catch(e){
            this.orgCallback('无法连接语音识别服务器，请稍候尝试')
            return;
        } 
        
        this.ws.onopen = () => {
            console.info('=====onopen websocket====')
            // this.timer = setInterval(() => {
            //     if (this.chunkIndex < chunks.length) {
            //         let mes = this.setAudioMessage(chunks[this.chunkIndex++]);
            //         console.info(this.chunkIndex+"::"+JSON.stringify(mes))
            //         this.ws.send(JSON.stringify(mes));
            //     } else {
            //         clearInterval(this.timer);
            //     }
            // }, this.interval);
            chunks.forEach(item =>{
                // console.info(item);
                let mes = this.setAudioMessage(item,false);
                if(this.chunkIndex == chunks.length){
                    mes = this.setAudioMessage(item,true);
                }
                this.ws.send(JSON.stringify(mes));
                this.chunkIndex++;
            })
        };

        /**
         * websocket服务端返回的识别结果
         * "endFlag":false,"errorCode":0,"errorMsg":"成功","result":"抗诉录音了呀","sid":"76012ace64d44c3b9e75bae035d179d2"
         * **/
        this.ws.onmessage = (event) => {
            console.log("come from client:Message from server ", event.data);
            let resultObj = event.data && JSON.parse(event.data);
            let map = this.resultMap;
            
            if(resultObj && resultObj.errorCode == 0){//识别成功
                let result = resultObj.result;
                
                if(map[this._voiceId]){//合并返回的识别结果
                    map[this._voiceId] = map[this._voiceId] + result;
                } else {
                    map[this._voiceId] = result;
                }
            } else {
                map[this._voiceId] = resultObj.errorMsg || '未能正确识别，请尝试调大音量';
            }
            
            
            setTimeout(() => {
                this.orgCallback();//websocket会分多次返回，延迟20MS
            }, 20);           
        }

        this.ws.onerror = (e) => {
           console.info('on websocket error')
           if(!this.getAutioResultFlg){//一直没正常响应回调，则在报错时回调
            this.orgCallback();
           }
        };

        this.ws.onclose = (e) => {
            if(!this.getAutioResultFlg){//一直没正常响应回调，则在报错时回调
                this.orgCallback();
            }
        };
    }
    //封装回调函数
    orgCallback(msg){
        let voiceTxt = this.resultMap[this._voiceId];
        let code = voiceTxt ? '0':'-1';
        if(code == '-1'){
            voiceTxt = msg || '未能识别到您的语音，请尝试调大音量';
        }
        let result = {
            code:code,
            voiceTxt:voiceTxt
        }
        let callBackFn = this.__proto__.callBackMethod;//配置语音识别成功后的结果
        if (callBackFn) {
            this.getAutioResultFlg = true;//获取结果成功
            if(typeof window[callBackFn] === "function"){
                window[callBackFn](result);//回调业务方法
            } else if (typeof callBackFn === "function"){
                callBackFn(result);//回调业务方法
            }
        }
    }
    //生成随机数
    getRandomStr(range=100000){
        let rangeNum = parseInt(range,16);
        return (((1+Math.random())*rangeNum)|0).toString(16).substring(1);  
    }
    //设置websocket服务器地址
    setWebSocketUrl(url){
        this.url = url;
    }

}



//使用客户端采集语音的回调方法
const getVoiceCbFn = (res)=>{
    let result = JSON.parse(res);
    let {retCode,retMsg,voiceFile} = result;
    console.info('getVoiceCb------')
    if(retCode == 0 && voiceFile){//客户端返回音频成功 采样率16k的wav格式音频
        let audioS = new AudioStreamer()
        let chunks = audioS.chunk(voiceFile);//分片处理音频字符串
        audioS.__proto__.callBackMethod = window['getVoiceCb'].__proto__.callBackMethod;
        audioS.chunkIndex = 0;
        chunks && audioS.sendAudio(chunks);//请求websocket上传音频
        
    } else {
        alert('获取语音失败，'+retMsg);
    }
}
//客户端的语音识别结果调用天衍能力去识别文本
//回调方法{{callBackFn}}
const reqClientRecord = (callBackFn)=> {
    
    window['getVoiceCb'] = getVoiceCbFn;

    // 首先，你需要确保这个函数存在
    if (callBackFn && typeof window[callBackFn] === "function") {
        // 然后，你可以给这个函数添加一个原型方法
        window['getVoiceCb'].__proto__.callBackMethod = callBackFn;
    }
    ClientJs.openVoiceRecord('getVoiceCb');
}

//H5采集的语音请求天衍能力去识别
const reqH5Voice = (voiceFile,websocketUrl,callBackMethod)=>{
    let audioS = new AudioStreamer()
    websocketUrl && audioS.setWebSocketUrl(websocketUrl);
    let chunks = audioS.chunk(voiceFile);//分片处理音频字符串
    audioS.__proto__.callBackMethod = callBackMethod;
    audioS.chunkIndex = 0;
    chunks && audioS.sendAudio(chunks);//请求websocket上传音频
}

export {
    reqClientRecord,
    reqH5Voice,
    AudioStreamer
}