from fastapi import APIRouter, WebSocket, WebSocketDisconnect
from funasr import AutoModel
import numpy as np
import os
import json
import logging

os.environ["MODELSCOPE_CACHE"] = "./"

router = APIRouter(prefix="/api")

# 初始化模型
chunk_size = [0, 10, 5]  # [0, 10, 5] 600ms
encoder_chunk_look_back = 4
decoder_chunk_look_back = 1

print("正在加载模型...")
model = AutoModel(model="paraformer-zh-streaming", disable_update=True)
print("模型加载完成！")


@router.websocket("/ws/asr")
async def websocket_endpoint(websocket: WebSocket):
    """WebSocket 端点，用于接收音频数据并返回识别结果"""
    await websocket.accept()
    logging.info("ASR连接建立")
    cache = {}
    chunk_stride = chunk_size[1] * 960  # 600ms

    # 静音检测相关变量
    silence_threshold = 0.01  # 静音阈值（音量小于此值视为静音）
    silence_duration = 0  # 当前静音持续时间（秒）
    silence_limit = 1.0  # 静音超过此时间则设置 is_final=True（秒）
    chunk_duration = 8192 / 16000  # 每个音频块的时长（秒）

    try:
        while True:
            # 接收音频数据（Float32Array 格式）
            data = await websocket.receive_bytes()

            # 将字节数据转换为 numpy 数组
            audio_chunk = np.frombuffer(data, dtype=np.float32)

            # 确保音频块大小正确
            if len(audio_chunk) == 0:
                continue

            # 如果音频块小于 chunk_stride，进行填充
            if len(audio_chunk) < chunk_stride:
                audio_chunk = np.pad(
                    audio_chunk, (0, chunk_stride - len(audio_chunk)))

            # 静音检测：计算音频块的能量（RMS）
            audio_energy = np.sqrt(np.mean(audio_chunk ** 2))

            if audio_energy < silence_threshold:
                # 当前是静音
                silence_duration += chunk_duration
            else:
                # 有声音，重置静音计数
                silence_duration = 0

            # 进行语音识别
            # 如果静音超过限制时间，设置 is_final=True 以获取最终结果
            is_final = silence_duration >= silence_limit

            try:
                res = model.generate(
                    input=audio_chunk,
                    cache=cache,
                    is_final=is_final,
                    chunk_size=chunk_size,
                    encoder_chunk_look_back=encoder_chunk_look_back,
                    decoder_chunk_look_back=decoder_chunk_look_back
                )

                # 提取识别文本
                if res and len(res) > 0:
                    text = res[0].get("text", "")
                    if text and text != "嗯":
                        await websocket.send_text(json.dumps({
                            'type': 'result',
                            'text': text,
                            'is_final': is_final
                        }))

                # 如果是 final 结果，重置缓存和静音计数，准备下一轮识别
                if is_final and silence_duration >= silence_limit:
                    cache = {}
                    silence_duration = 0

            except Exception as e:
                await websocket.send_text(json.dumps({
                    'type': 'error',
                    'text': f'ASR服务器错误: {str(e)}'
                }))

    except WebSocketDisconnect:
        pass
    except Exception as e:
        try:
            await websocket.send_json({"error": str(e), "type": "error"})
        except:
            pass
