import os
import sherpa_onnx
import time

device_name = 'plughw:1,0,0'
print(f"device_name: {device_name}")
alsa = sherpa_onnx.Alsa(device_name)

sample_rate = 16000
samples_per_read = int(0.1 * sample_rate)  # 0.1 second = 100 ms

config = sherpa_onnx.VadModelConfig()
config.silero_vad.model = "./models/vad/silero_vad.onnx"
config.silero_vad.max_speech_duration = 10.0
config.silero_vad.min_speech_duration = 0.6
config.silero_vad.min_silence_duration = 1.0
config.sample_rate = sample_rate


vad = sherpa_onnx.VoiceActivityDetector(config, buffer_size_in_seconds=30)

recognizer = sherpa_onnx.OfflineRecognizer.from_paraformer(
        tokens="./models/asr/sherpa-onnx-paraformer-zh-small-2024-03-09/tokens.txt",
        paraformer='./models/asr/sherpa-onnx-paraformer-zh-small-2024-03-09/model.int8.onnx')



def asrReadText():
    asrResult = ''
    printed = False
    k = 0

    while True:
        samples = alsa.read(samples_per_read)  # a blocking read

        vad.accept_waveform(samples)

        if vad.is_speech_detected() and not printed:
            print("[asr] Detected speech")
            printed = True

        if not vad.is_speech_detected():
            printed = False

        while not vad.empty():
            samples = vad.front.samples
            duration = len(samples) / sample_rate
            print(f"[asr] Duration: {duration:.3f} seconds")
            stream = recognizer.create_stream()
            stream.accept_waveform(sample_rate, samples)
            recognizer.decode_stream(stream)
            result = stream.result.text
            print("[asr] ASR result:", result)
            asrResult += result + '\n'
            vad.pop()
            if vad.empty():
                return asrResult.strip()
            
    return asrResult.strip()