import logging
import time
from functools import lru_cache

from fastapi import HTTPException

from utils.funasr_client_api import Funasr_websocket_recognizer

from utils.Config import AppConfig

config = AppConfig()

# 提取FunASR模型初始化相关逻辑为单独函数
def init_funasr_recognizer():
    return Funasr_websocket_recognizer(
        host=config.FUNASR_HOST,
        port=config.FUNASR_PORT,
        is_ssl=config.FUNASR_IS_SSL,
        mode=config.FUNASR_MODE,
        chunk_size=config.FUNASR_CHUNK_SIZE
    )

@lru_cache(maxsize=128)  # 缓存FunASR模型实例，避免重复创建
def funasr_speech_to_text(audio_bytes: bytes, rcg) -> str:
    start_time = time.time()
    text = ''
    try:
        chunk_num = (len(audio_bytes) - 1) // config.AUDIO_CHUNK + 1
        for i in range(chunk_num):
            beg = i * config.AUDIO_CHUNK
            data = audio_bytes[beg: beg + config.AUDIO_CHUNK]
            resp = rcg.feed_chunk(data, wait_time=0.02)
            if len(resp) > 0 and resp['mode'] == '2pass-offline':
                text = resp['text']
        if not text:
            logging.warning("语音识别未获取到有效文本内容，可能是音频数据问题或识别条件未满足。")
            raise HTTPException(status_code=400, detail="语音识别失败，可能是音频数据质量不佳或不符合要求，请重新上传音频。")
        # return text
    except Exception as e:
        logging.error(f"语音识别出现其他错误: {e}")
        raise HTTPException(status_code=500, detail="语音识别出现内部错误，请联系管理员。")
    finally:
        end_time = time.time()
        logging.info(f"语音识别耗时: {end_time - start_time} 秒，识别结果: {text}")
        return text