import gradio as gr
import librosa  # 导入librosa库用于音频处理
import numpy as np
from funasr import AutoModel
from pydub.exceptions import CouldntDecodeError

chunk_size = [0, 10, 5]  # [0, 10, 5] 600ms, [0, 8, 4] 480ms
encoder_chunk_look_back = 4  # number of chunks to lookback for encoder self-attention
decoder_chunk_look_back = 1  # number of encoder chunks to lookback for decoder cross-attention

model = AutoModel(model="paraformer-zh-streaming", disable_update=True, device="cuda:0")
punc_model = AutoModel(model="ct-punc", disable_update=True, device="cuda:0")

last_text = ""  # 全局变量保存上一次的识别结果

def transcribe(state, audio_chunk):
    global last_text
    cache = {}
    cache1 = {}
    try:
        sr, y = audio_chunk
        # Convert to mono if stereo
        if y.ndim > 1:
            y = y.mean(axis=1)

        y = y.astype(np.float32)
        # 添加空数组检查
        if len(y) == 0:
            return state, last_text  # 返回上一次的结果
        # 添加非零检查
        if np.max(np.abs(y)) == 0:
            return state, last_text  # 返回上一次的结果

        y /= np.max(np.abs(y))
        if y is None:
            return state, last_text  # 返回上一次的结果

        speech = y  # 提取音频数据
        original_sr = sr  # 获取原始采样率

        # 将音频从原始采样率转换为16kHz
        if original_sr != 16000:
            speech = librosa.resample(y=speech, orig_sr=original_sr, target_sr=16000)

        if state is not None:
            state = np.concatenate([state, speech])
        else:
            state = y

        chunk_stride = chunk_size[1] * 960
        valid_length = len(state) - encoder_chunk_look_back * chunk_stride
        total_chunk_num = max(1, int(valid_length // chunk_stride))
        text_output = ""

        for i in range(total_chunk_num):
            end_idx = min((i + 1) * chunk_stride, len(state))
            speech_chunk = state[i * chunk_stride:end_idx]
            is_final = i == total_chunk_num - 1
            res = model.generate(input=speech_chunk, cache=cache, is_final=is_final, chunk_size=chunk_size,
                                 encoder_chunk_look_back=encoder_chunk_look_back,
                                 decoder_chunk_look_back=decoder_chunk_look_back)
            text_output += res[0]['text']

        last_text = text_output  # 更新最后一次成功的识别结果
        last_text = punc_model.generate(input=text_output, cache=cache1)[0]['text']
        return state, last_text

    except CouldntDecodeError:
        return state, last_text  # 返回上一次的结果
    except Exception as e:
        print(f"发生未知错误: {str(e)}")  # 打印错误信息
        return state, last_text  # 返回上一次的结果


# def transcribe(state, audio_chunk):
#     global cache
#     if audio_chunk is None:
#         return state, ""
#     try:
#         speech = audio_chunk[1]  # 提取音频数据
#         original_sr = audio_chunk[0]  # 获取原始采样率
#
#         # 检查音频数据长度
#         if len(speech) == 0:
#             return state, ""
#
#         # 将音频数据转换为浮点类型
#         speech = speech.astype(np.float32)
#
#         # 将音频从原始采样率转换为16kHz
#         if original_sr != 16000:
#             speech = librosa.resample(y=speech, orig_sr=original_sr, target_sr=16000)
#
#         # 修正 total_chunk_num 的计算
#         chunk_stride = chunk_size[1] * 960  # 600ms
#         total_chunk_num = int((len(speech) - 1) // chunk_stride + 1)
#         text_output = ""
#         for i in range(total_chunk_num):
#             speech_chunk = speech[i * chunk_stride:(i + 1) * chunk_stride]
#             is_final = i == total_chunk_num - 1
#             res = model.generate(input=speech_chunk, cache=cache, is_final=is_final, chunk_size=chunk_size,
#                                  encoder_chunk_look_back=encoder_chunk_look_back,
#                                  decoder_chunk_look_back=decoder_chunk_look_back)
#             text_output += res[0]['text']
#
#         return state, text_output
#     except CouldntDecodeError:
#         return state, "音频解码失败，请检查音频文件是否损坏。"
#     except Exception as e:
#         return state, f"发生未知错误: {str(e)}"

# demo = gr.Interface(
#     transcribe,
#     ["state", gr.Audio(sources=["microphone"], streaming=True)],
#     ["state", "text"],
#     live=True,
# )

with gr.Blocks() as demo:
    state = gr.State(np.array([]))
    audio = gr.Audio(sources=["microphone"], streaming=True)
    text = gr.Textbox()

    audio.stream(transcribe, [state, audio], [state, text])
    audio.stop_recording(lambda: "stop", None, outputs=[state, text], queue=False)
if __name__ == "__main__":
    demo.launch()