/*
 * @Author: wukunling 13022195532@163.com
 * @Date: 2025-01-22 09:29:32
 * @LastEditors: wukunling 13022195532@163.com
 * @LastEditTime: 2025-02-07 19:11:18
 * @FilePath: /config/record-lpcm16.js
 * @Description: 这是默认设置,请设置`customMade`, 打开koroFileHeader查看配置 进行设置: https://github.com/OBKoro1/koro1FileHeader/wiki/%E9%85%8D%E7%BD%AE
 */
const recorder = require('node-record-lpcm16');
const fs = require('fs');
const request = require('request');
const path = require('path');
const axios = require('axios')
const Nls = require("alibabacloud-nls")
var player = require('play-sound')()
var appkey = '6PNfxknvtOhFpCPE';
var token = '828ef1078e3d462abce8fabad3fe022a';
var urls = 'https://nls-gateway-cn-shanghai.aliyuncs.com/stream/v1/asr';
const URL = "wss://nls-gateway-cn-shanghai.aliyuncs.com/ws/v1"
var audioFile = './output.wav';
var oneFiles = './one.wav';
var format = 'pcm';
var sampleRate = '16000';
var enablePunctuationPrediction = true;
var enableInverseTextNormalization = true;
var enableVoiceDetection = false;
const { setTimeout } = require('timers/promises');
const sleep = (waitTimeInMs) => new Promise(resolve => setTimeout(resolve, waitTimeInMs))

let loadIndex = 0



/**
 *  控制轮训
 */
let isRunning = true;

// // 创建一个可写流，将录音数据写入文件
let outputFile = fs.createWriteStream(audioFile);

const recording = recorder.record()


/**
 * 录音文件大小
 */
const fileSize = () => {
    // const filePath = path.join(__dirname, audioFile);
    const fileSizeInBytes = fs.statSync('./output.wav').size;
    const fileSizeInKB = fileSizeInBytes / 1024;
    const fileSizeInMB = fileSizeInKB / 1024;
    return fileSizeInMB
}


function recordCallback(error, result) {
    if (error) {
        console.error('录音失败:', error);
    } else {
        console.log('录音成功');
        // 将录音结果保存到文件
        recording.stream().pipe(outputFile)
        //   fs.writeFileSync(audioFile, result);
       
        // 重新开始录音
        recording.start({
            sampleRate: 16000, // 采样率
            channels: 1,       // 通道数
            bitDepth: 16,      // 位深度
        });
       
    }
}




/**
 * 定时轮训
 */
async function run() {
    fs.rmSync(audioFile);
    console.log('删除文件')
    outputFile = fs.createWriteStream(audioFile);

    recordCallback()
    

     // 使用示例
 
    while (true) {
        console.log('Polling...', new Date());
        await setTimeout(5000); // 等待3秒，然后继续循环
        
        // const res =  await checkWavFile('./output.wav')  
        // console.log(`File is valid: ${res}`);
            
        await voiceToText()
        console.log('Stoping...', new Date());
        // 在这里可以加入停止条件，例如：
        if (isRunning) {
            console.log('Break...', new Date());
            break;
        }
    }
}

run(); // 开始轮询

function process(requestUrl, token, audioFile) {
    /**
     * 读取音频文件
    */
    var audioContent = null;
    try {
        audioContent = fs.readFileSync(audioFile);
    } catch (error) {
        if (error.code == 'ENOENT') {
            console.log(error, 'The audio file is not exist!');
        }
        return;
    }

    /**
     * 设置HTTPS请求头部
    */
    var httpHeaders = {
        'X-NLS-Token': token,
        'Content-type': 'application/octet-stream',
        'Content-Length': audioContent.length
    };

    var options = {
        url: requestUrl,
        method: 'POST',
        headers: httpHeaders,
        body: audioContent
    };

    request(options, callback);
}


const aichat = (text) => {
    var data = JSON.stringify({
        "model": "deepseek-chat",
        "messages": [
            {
                "role": "user",
                "content": text
            }
        ],
        "stream": false
    });

    var config = {
        method: 'post',
        url: 'https://api.deepseek.com/chat/completions',
        headers: {
            'Authorization': 'Bearer sk-14be660432eb40f1ab0f609e37a3ee1a',
            'Content-Type': 'application/json'
        },
        data: data
    };

    axios(config)
        .then(function (response) {
            let text = response.data.choices[0].message.content
            text = text.replace(/\s+/g, '');
            console.log(text, '输出语音内容===');
            if(text&&  !text.includes('HowcanIassistyoutoday')){
                textToVoice(text)
            }else{
                run();
            }
        })
        .catch(function (error) {
            console.log(error);
        });

}




function callback(error, response, body) {
    if (error != null) {
        console.log(error);
    }
    else {
        console.log('The audio file recognized result:');
        console.log(body);
        if (response.statusCode == 200) {
            body = JSON.parse(body);
            if (body.status == 20000000) {
                console.log('result: ' + body.result);

                aichat(body.result)


                console.log('The audio file recognized succeed!');
            } else {
                console.log('The audio file recognized failed!');
            }
        } else {
            console.log('The audio file recognized failed, http code: ' + response.statusCode);
        }
    }
}


/**
 * 语音转文字
 */
async function voiceToText() {
    /**
     * 设置RESTful请求参数
     */
    var requestUrl = urls;
    requestUrl = requestUrl + '?appkey=' + appkey;
    requestUrl = requestUrl + '&format=' + format;
    requestUrl = requestUrl + '&sample_rate=' + sampleRate;
    if (enablePunctuationPrediction) {
        requestUrl = requestUrl + '&enable_punctuation_prediction=' + 'true';
    }
    if (enableInverseTextNormalization) {
        requestUrl = requestUrl + '&enable_inverse_text_normalization=' + 'true';
    }
    if (enableVoiceDetection) {
        requestUrl = requestUrl + '&enable_voice_detection=' + 'true';
    }

    await process(requestUrl, token, audioFile);
}



async function textToVoice(line) {
    console.log(`speak: ${line}`)
    loadIndex++

    let dumpFile = fs.createWriteStream(oneFiles, { flags: "w" })
    let tts = new Nls.SpeechSynthesizer({
        url: URL,
        appkey: appkey,
        token
    })

    tts.on("meta", (msg) => {
        console.log("Client recv metainfo:", msg)
    })

    tts.on("data", (msg) => {
        console.log(`recv size: ${msg.length}`)
        dumpFile.write(msg, "binary")
        //   audio.kill()
    })

    tts.on("completed", (msg) => {
        console.log("Client recv completed:", msg)
    })

    tts.on("closed", () => {
        console.log("Client recv closed")
        player.play('one.wav', async (err) => {
            console.log('播放完成')
            fs.writeFile(audioFile, '', err => {
                if (err) throw err;
                console.log('文件已清空');
                run()
              });
            if (err) throw err
        })
    })

    tts.on("failed", (msg) => {
        console.log("Client recv failed:", msg)
    })

    let param = tts.defaultStartParams()
    // 待合成文本
    param.text = line
    // 发音人
    param.voice = "aixia"
    // 语调，范围是-500~500，可选，默认是0
    // param.pitch_rate = 100
    // 语速，范围是-500~500，默认是0
    // param.speech_rate = 100
    // 设置返回音频的编码格式
    // param.format = "wav"
    // 设置返回音频的采样率
    // param.sample_rate = 16000
    // 是否开启字级别时间戳
    // param.enable_subtitle = true
    try {
        await tts.start(param, true, 6000)
    } catch (error) {
        console.log("error on start:", error)
        return
    } finally {
        //dumpFile.end()
    }
    console.log("synthesis done")
    await sleep(1000)
}





 
function checkWavFile(filePath) {
    return new Promise((resolve, reject) => {
        fs.open(filePath, 'r', (err, fd) => {
            if (err) {
                reject(err);
                return;
            }
 
            const buffer = Buffer.alloc(44); // 分配44字节的缓冲区来读取头部
            fs.read(fd, buffer, 0, 44, 0, (err, bytesRead, buffer) => {
                fs.close(fd, (err) => {
                    if (err) {
                        reject(err);
                        return;
                    }
 
                    // 检查是否正确读取了44字节
                    // if (bytesRead !== 44) {
                    //     resolve(false); // 文件太短，不符合WAV格式要求
                    //     return;
                    // }
 
                    // 检查RIFF头部是否正确
                    // if (buffer.toString('ascii', 0, 4) !== 'RIFF' || buffer.toString('ascii', 8, 12) !== 'WAVE') {
                    //     resolve(false); // RIFF头不正确
                    //     return;
                    // }
 
                    // 检查fmt块是否存在且大小正确（通常为16或18字节）
                    // const fmtChunkSize = buffer.readUInt32LE(20); // fmt块的长度应该在20位置，为32位无符号整数
                    // if (fmtChunkSize !== 16 && fmtChunkSize !== 18) { // PCM通常是16或18字节，但16字节是最小的有效大小
                    //     resolve(false); // fmt块大小不正确
                    //     return;
                    // }
 
                    // 检查data块是否存在且大小不为零
                    const dataChunkSize = buffer.readUInt32LE(40); // data块的大小在40位置，为32位无符号整数
                    if (dataChunkSize === 0) {
                        resolve(false); // data块大小为0，无音频数据
                        return;
                    }
                    console.log(`dataChunkSize: ${dataChunkSize}`)
                    resolve(true); // 文件有效且包含音频数据
                });
            });
        });
    });
}
 
