import gradio as gr
import numpy as np
from funasr import AutoModel
import librosa
# 使用 AutoModel 加载 FunASR 模型
# 这里使用了一个中文的流式识别模型，你可以根据需要更换其他模型
model = AutoModel(model="paraformer-zh", vad_model="fsmn-vad", vad_kwargs={"max_single_segment_time": 30000}, device="cuda:0", disable_update=True)
punc_model = AutoModel(model="ct-punc", disable_update=True, device="cuda:0")
#
# def transcribe(audio):
#     try:  # 添加异常捕获
#         if audio is None:
#             return ""
#         sr, y = audio
#
#         if y.ndim > 1:
#             y = y.mean(axis=1)
#
#         y = y.astype(np.float32)
#         y /= np.max(np.abs(y))
#         # 修复采样率转换逻辑
#         if sr != 16000:
#             y = librosa.resample(y=y, orig_sr=sr, target_sr=16000)  # 修改为动态采样率
#
#         result = model.generate(y, use_itn=True)
#         return result[0].get("text", "") if result else ""
#     except Exception as e:
#         print(f"Error processing audio: {str(e)}")
#         return "[识别错误] 请重试"
#
# demo = gr.Interface(
#     transcribe,
#     gr.Audio(sources="microphone", recording=True, type="filepath", format="mp3",min_length=3,max_length=20, streaming=True),
#     "text",
# )

def transcribe(state, audio_chunk):
    global last_text
    cache = {}
    cache1 = {}
    try:
        # 实时流处理
        if audio_chunk is not None:

            sr, y = audio_chunk
            # ... 保留预处理逻辑 ...
            if y.ndim > 1:
                y = y.mean(axis=1)

            y = y.astype(np.float32)
            # 添加空数组检查
            if len(y) == 0:
                return state, last_text  # 返回上一次的结果
            # 添加非零检查
            if np.max(np.abs(y)) == 0:
                return state, last_text  # 返回上一次的结果

            y /= np.max(np.abs(y))
            if y is None:
                return state, last_text  # 返回上一次的结果

            speech = y  # 提取音频数据
            original_sr = sr  # 获取原始采样率

            # 将音频从原始采样率转换为16kHz
            if original_sr != 16000:
                speech = librosa.resample(y=speech, orig_sr=original_sr, target_sr=16000)
            # 持续累积音频数据
            if state is not None:
                state = np.concatenate([state, speech])
            else:
                state = speech
            return state, ""  # 实时阶段返回空文本



        return state, ""
    except Exception as e:
        print(f"处理错误: {str(e)}")
        return state, ""


def stop_and_build(state):
    # 当trigger_mode='always_last'触发时（流式输入结束）
    if state is not None and len(state) > 0:
        # 最终整体处理
        result = model.generate(state, use_itn=True)
        text_output = result[0]['text'] if result else ""
        text_output = punc_model.generate(text_output)[0]['text']
        return np.array([]), text_output  # 清空state并返回结果


with gr.Blocks() as demo:
    state = gr.State(np.array([]))
    audio = gr.Audio(sources=["microphone"], streaming=True)
    text = gr.Textbox()

    audio.stream(
        transcribe,
        [state, audio],
        [state, text],
        show_progress='minimal',
        trigger_mode="always_last"  # 确保最后触发一次处理
    )
    audio.start_recording()
    audio.stop_recording(stop_and_build, state,[state,text])
if __name__ == "__main__":
    demo.launch(
        show_error=True  # 显示详细错误
    )