import gradio as gr
from transformers import pipeline
import numpy as np
import os
os.environ["HF_ENDPOINT"] = "https://hf-mirror.com/"
transcriber = pipeline("automatic-speech-recognition", model="BELLE-2/Belle-whisper-large-v3-zh")
#transcriber = pipeline("automatic-speech-recognition", model="openai/whisper-base.en")

def transcribe(stream, new_chunk):

    # 处理停止录音时的空输入
    if new_chunk is None or new_chunk[1].size == 0:
        return stream, ""  # 返回空文本而不是尝试处理空音频
    sr, y = new_chunk
    print(len(y))
    # 添加异常捕获块
    try:
        # Convert to mono if stereo
        if y.ndim > 1:
            y = y.mean(axis=1)

        y = y.astype(np.float32)
        if y.size == 0:  # 防止空数组操作
            return stream, ""

        y /= np.max(np.abs(y))
    except Exception as e:
        print(f"Audio processing error: {str(e)}")
        return stream, ""

    if stream is not None:
        stream = np.concatenate([stream, y])
    else:
        stream = y

    return stream, transcriber({"sampling_rate": sr, "raw": stream})["text"]


demo = gr.Interface(
    transcribe,
    ["state", gr.Audio(sources=["microphone"], streaming=True, type="numpy")],
    ["state", "text"],
    live=True,
)

demo.launch(
    # prevent_thread_lock=True,
    show_error=True,
    # enable_queue=True  # 添加任务队列保证数据完整性
)