import streamlit as st
import pyaudio
import wave
import os
from vosk import Model, KaldiRecognizer
import json

# 加载Vosk模型
model_path = "zhongwen"  # 确保模型路径正确
if not os.path.exists(model_path):
    st.error(f"模型路径 {model_path} 不存在，请下载并放置正确路径")
    st.stop()

model = Model(model_path)

# 初始化PyAudio
p = pyaudio.PyAudio()

# Streamlit界面
st.title("实时语音识别客户端")
st.write("点击下方按钮开始录音，录音结束后会自动转换为文字。")

# 录音状态
if 'recording' not in st.session_state:
    st.session_state.recording = False

# 开始录音按钮
if st.button("开始录音"):
    if not st.session_state.recording:
        st.session_state.recording = True
        st.write("录音中...")

        # 打开音频流
        stream = p.open(
            format=pyaudio.paInt16,
            channels=1,
            rate=16000,
            input=True,
            frames_per_buffer=4000
        )

        frames = []
        recognizer = KaldiRecognizer(model, 16000)

        while st.session_state.recording:
            data = stream.read(4000)
            frames.append(data)
            if recognizer.AcceptWaveform(data):
                result = recognizer.Result()
                result_text = json.loads(result)['text']
                st.write(f"识别结果: {result_text}")

        # 停止录音
        stream.stop_stream()
        stream.close()
        st.session_state.recording = False
        st.write("录音结束。")

# 结束录音按钮
if st.button("结束录音"):
    if st.session_state.recording:
        st.session_state.recording = False
        st.write("录音已结束。")

# 关闭PyAudio
p.terminate()