import pyaudio
import vosk
import json


def vosk_speech_to_text_demo():
    # 初始化Vosk模型
    model = vosk.Model("D:/modelscope/vosk-model-small-cn-0.22")
    # 创建识别器，设置采样率为16000Hz
    recognizer = vosk.KaldiRecognizer(model, 16000)
    # 设置识别器接受除了单词以外的其他信息，如标点符号等
    recognizer.SetWords(True)

    # 初始化PyAudio
    p = pyaudio.PyAudio()
    # 打开音频流，设置音频格式为16位整数，单声道，采样率16000Hz
    stream = p.open(format=pyaudio.paInt16, channels=1, rate=16000, input=True, frames_per_buffer=8192)
    stream.start_stream()

    print("开始语音识别，按Ctrl+C停止...")
    try:
        while True:
            # 从音频流中读取数据
            data = stream.read(4096)
            if recognizer.AcceptWaveform(data):
                # 处理识别结果
                result = json.loads(recognizer.Result())
                print(result.get('text', ''))
    except KeyboardInterrupt:
        print("\n识别已停止")
    finally:
        # 停止并关闭音频流
        stream.stop_stream()
        stream.close()
        # 终止PyAudio
        p.terminate()


if __name__ == "__main__":
    vosk_speech_to_text_demo()