/**
 * 麦克风语音识别子线程
 */

import portAudio from 'naudiodon2';
import sherpa_onnx from 'sherpa-onnx-node';
import path from 'path';
import { parentPort } from 'worker_threads';
import { fileURLToPath } from 'url';

const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);

function createOnlineRecognizer() {
    const config = {
        'featConfig': {
            'sampleRate': 16000,
            'featureDim': 80,
        },
        'modelConfig': {
            'transducer': {
                'encoder': path.join(__dirname, 'model/encoder-epoch-11-avg-1.onnx'),
                'decoder': path.join(__dirname, 'model/decoder-epoch-11-avg-1.onnx'),
                'joiner': path.join(__dirname, 'model/joiner-epoch-11-avg-1.onnx'),
            },
            'tokens': path.join(__dirname, 'model/tokens.txt'),
            'numThreads': 2,
            'provider': 'cpu',
            'debug': 1,
        },
        'decodingMethod': 'greedy_search',
        'maxActivePaths': 4,
        'enableEndpoint': true,
        'rule1MinTrailingSilence': 2.4,
        'rule2MinTrailingSilence': 1.2,
        'rule3MinUtteranceLength': 20,
    };

    return new sherpa_onnx.OnlineRecognizer(config);
}

const recognizer = createOnlineRecognizer();
const stream = recognizer.createStream();

let lastText = '';
let segmentIndex = 0;
let dispose = true;

const ai = new portAudio.AudioIO({
    inOptions: {
        channelCount: 1,
        closeOnError: true,  // Close the stream if an audio error is detected, if
                             // set false then just log the error
        deviceId: -1,  // Use -1 or omit the deviceId to select the default device
        sampleFormat: portAudio.SampleFormatFloat32,
        sampleRate: recognizer.config.featConfig.sampleRate,
        framesPerBuffer: 1024,
    },
});

const display = new sherpa_onnx.Display(50);

ai.on('data', data => {
    const samples = new Float32Array(data.buffer);

    stream.acceptWaveform({
        sampleRate: recognizer.config.featConfig.sampleRate,
        samples: samples,
    });

    while (recognizer.isReady(stream)) {
        recognizer.decode(stream);
    }

    const isEndpoint = recognizer.isEndpoint(stream);
    const text = recognizer.getResult(stream).text.toLowerCase();

    if (text.length > 0 && lastText != text) {
        dispose = false;
        lastText = text;
    }
    if (isEndpoint) {
        if (text.length > 0) {
            lastText = text;
            segmentIndex += 1;
        }
        if (!dispose) {
            dispose = true;
            if (parentPort === null) {
                console.log('Segment', segmentIndex, ':', lastText);
            } else {
                parentPort.postMessage({text: lastText, segmentIndex});
            }
        }
        recognizer.reset(stream);
    }
});

ai.start();