import streamlit as st
import pyaudio
import wave
import os
from vosk import Model, KaldiRecognizer
import json

# 设置页面标题
st.title("实时语音识别客户端")
st.write("点击下方按钮开始录音，录音结束后可以下载音频文件并查看识别结果。")

# 初始化Vosk模型
model_path = "zhongwen"  # 确保模型路径正确
if not os.path.exists(model_path):
    st.error(f"模型路径 {model_path} 不存在，请下载并放置正确路径")
    st.stop()

model = Model(model_path)

# 初始化PyAudio
p = pyaudio.PyAudio()

# 录音状态和音频数据
if 'recording' not in st.session_state:
    st.session_state.recording = False
if 'frames' not in st.session_state:
    st.session_state.frames = []
if 'audio_file' not in st.session_state:
    st.session_state.audio_file = None

# 开始录音按钮
if st.button("开始录音"):
    if not st.session_state.recording:
        st.session_state.recording = True
        st.session_state.frames = []  # 清空之前的录音数据
        st.write("录音中...")

        # 打开音频流
        stream = p.open(
            format=pyaudio.paInt16,
            channels=1,
            rate=16000,
            input=True,
            frames_per_buffer=4000
        )

        # 录音循环
        while st.session_state.recording:
            data = stream.read(4000)
            st.session_state.frames.append(data)

        # 停止录音
        stream.stop_stream()
        stream.close()
        st.write("录音结束。")

        # 保存录音文件
        audio_file = "output.wav"
        with wave.open(audio_file, 'wb') as wf:
            wf.setnchannels(1)
            wf.setsampwidth(p.get_sample_size(pyaudio.paInt16))
            wf.setframerate(16000)
            wf.writeframes(b''.join(st.session_state.frames))
        st.session_state.audio_file = audio_file

# 结束录音按钮
if st.button("结束录音"):
    if st.session_state.recording:
        st.session_state.recording = False
        st.write("录音已结束。")

# 下载录音文件
if st.session_state.audio_file:
    st.write("点击下方按钮下载录音文件：")
    with open(st.session_state.audio_file, "rb") as f:
        st.download_button(
            label="下载录音",
            data=f,
            file_name="recording.wav",
            mime="audio/wav"
        )

# 语音转文字功能
if st.button("将录音转换为文字"):
    if st.session_state.audio_file:
        recognizer = KaldiRecognizer(model, 16000)
        text_result = ""

        # 读取录音文件并识别
        with wave.open(st.session_state.audio_file, 'rb') as wf:
            while True:
                data = wf.readframes(4000)
                if len(data) == 0:
                    break
                if recognizer.AcceptWaveform(data):
                    result = recognizer.Result()
                    text_result += json.loads(result)['text'] + " "

        st.write("识别结果：")
        st.write(text_result)
    else:
        st.write("请先录音并保存音频文件。")

# 关闭PyAudio
p.terminate()