from app.utils.logger import Logger
import tempfile
import os
import wave
from typing import Any, Optional

# 创建日志记录器
logger = Logger("asr_mode")

class ASRService:
    def __init__(self):
        # 初始化语音识别模型
        self.model: Optional[Any] = None
        self.rich_transcription_postprocess: Optional[Any] = None
        self.initialize_model()
    
    def initialize_model(self):
        """初始化语音识别模型"""
        try:
            from funasr import AutoModel
            from funasr.utils.postprocess_utils import rich_transcription_postprocess
            
            # 获取当前文件所在目录
            current_dir = os.path.dirname(os.path.abspath(__file__))
            # 构建模型路径: app/iic/SenseVoiceSmall
            model_path = os.path.join(current_dir, "..", "iic", "SenseVoiceSmall")
            model_path = os.path.abspath(model_path)
            
            # 确保模型目录存在
            if not os.path.exists(model_path):
                raise FileNotFoundError(f"模型目录不存在: {model_path}")
            
            # 加载模型 - 使用本地模型路径
            self.model = AutoModel(
                model=model_path,
                trust_remote_code=True,
                vad_model="fsmn-vad",
                vad_kwargs={"max_single_segment_time": 30000},
                device="cpu",
                disable_update=True,
            )
            self.rich_transcription_postprocess = rich_transcription_postprocess
            # 注意：这里我们不直接输出日志，而是让调用者决定何时输出
        except Exception as e:
            logger.error(f"模型加载失败: {e}")
            raise

    def save_audio_to_wav(self, audio_data: bytes, sample_rate: int = 16000) -> str:
        """将音频数据保存为 WAV 文件"""
        try:
            # 创建临时文件
            temp_file = tempfile.NamedTemporaryFile(suffix='.wav', delete=False)
            temp_filename = temp_file.name
            temp_file.close()
            
            # 将音频数据写入 WAV 文件
            with wave.open(temp_filename, 'wb') as wav_file:
                wav_file.setnchannels(1)  # 单声道
                wav_file.setsampwidth(2)   # 16位
                wav_file.setframerate(sample_rate)
                wav_file.writeframes(audio_data)
            
            return temp_filename
        except Exception as e:
            logger.error(f"保存音频文件失败: {e}")
            raise

    def recognize_audio(self, audio_data: bytes) -> str:
        """识别音频数据"""
        # 检查模型是否已初始化
        if self.model is None or self.rich_transcription_postprocess is None:
            logger.error("语音识别模型未初始化")
            return "识别失败：模型未初始化"
            
        try:
            # 将音频数据保存为临时文件
            temp_audio_file = self.save_audio_to_wav(audio_data)
            
            # 使用模型进行识别
            res = self.model.generate(
                input=temp_audio_file,
                cache={},
                language="zh",
                use_itn=True,
                batch_size_s=60,
                merge_vad=True,
                merge_length_s=15,
            )
            
            # 处理识别结果
            if res and len(res) > 0:
                text = self.rich_transcription_postprocess(res[0]["text"])
            else:
                text = ""
            
            
            # 删除临时文件
            os.unlink(temp_audio_file)
            
            return text
        except Exception as e:
            logger.error(f"音频识别失败: {e}")
            return "识别失败"