"""
ASR服务，基于sherpa-onnx的VAD+非流式ASR，直接采集麦克风音频并分段识别。
参考eu03项目的成功实现
"""
import os
import time
import logging
import multiprocessing
from pathlib import Path
from typing import Optional, Union, Tuple, Callable
import numpy as np
import queue
import asyncio
import threading

import sounddevice as sd
import sherpa_onnx

logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - [ASR_SERVICE] - %(message)s'
)
logger = logging.getLogger("ASR_SERVICE")

# Helper function for resampling
def resample_audio(audio_data: np.ndarray, original_sr: int, target_sr: int) -> np.ndarray:
    """使用线性插值重采样音频数据"""
    if original_sr == target_sr:
        return audio_data

    num_original_samples = len(audio_data)
    duration = num_original_samples / original_sr
    num_target_samples = int(duration * target_sr)

    if num_target_samples == 0:
        return np.array([], dtype=audio_data.dtype)

    # 创建原始和目标采样点的时间轴
    time_original = np.linspace(0, duration, num_original_samples, endpoint=False)
    time_target = np.linspace(0, duration, num_target_samples, endpoint=False)

    # 进行线性插值
    resampled_data = np.interp(time_target, time_original, audio_data)
    return resampled_data.astype(audio_data.dtype)

class ASRService:
    def __init__(self, config=None):
        self.config = config
        self._running = False
        self._recognizer = None
        self._vad = None
        self._target_asr_sample_rate = config.audio.sample_rate if config else 16000
        self._window_size = None
        self._device_actual_sample_rate = self._target_asr_sample_rate
        self._input_device_name_for_log = "未知"
        
        # 实时识别相关
        self.is_realtime_running = False
        self.realtime_thread = None
        self.audio_stream = None
        self.callback = None
        self.event_loop = None
        
        # 初始化模型
        self._init_models()

    def _init_models(self):
        """初始化VAD和ASR模型"""
        # VAD模型
        vad_model_path = "models/vad/silero_vad.onnx"
        if self.config and hasattr(self.config, 'vad'):
            vad_model_path = self.config.vad.model_path
        
        # 处理相对路径
        model_path = Path(vad_model_path)
        if not model_path.is_absolute():
            model_path = Path(__file__).parent.parent / vad_model_path
        
        assert model_path.is_file(), f"VAD模型文件不存在: {model_path}"
        
        vad_config = sherpa_onnx.VadModelConfig()
        vad_config.silero_vad.model = str(model_path)
        vad_config.silero_vad.min_silence_duration = 0.25
        vad_config.sample_rate = self._target_asr_sample_rate
        self._window_size = vad_config.silero_vad.window_size
        self._vad = sherpa_onnx.VoiceActivityDetector(vad_config, buffer_size_in_seconds=100)
        logger.info(f"VAD模型加载完成: {model_path} (期望输入采样率: {self._target_asr_sample_rate} Hz)")

        # ASR模型
        model_dir = "models/asr/sherpa-onnx-paraformer-zh-2024-03-09"
        tokens_file = "tokens.txt"
        model_file = "model.int8.ort"
        
        if self.config and hasattr(self.config, 'asr') and hasattr(self.config.asr, 'model'):
            model_dir = self.config.asr.model.model_dir
            tokens_file = self.config.asr.model.tokens_file
            model_file = self.config.asr.model.model_file
        
        asr_model_dir = Path(__file__).parent.parent / model_dir
        tokens = str(asr_model_dir / tokens_file)
        paraformer = str(asr_model_dir / model_file)
        
        assert Path(tokens).is_file(), f"ASR tokens文件不存在: {tokens}"
        assert Path(paraformer).is_file(), f"ASR paraformer模型不存在: {paraformer}"
        
        num_threads = 2
        feature_dim = 80
        decoding_method = "greedy_search"
        
        if self.config and hasattr(self.config, 'asr') and hasattr(self.config.asr, 'model'):
            num_threads = getattr(self.config.asr.model, 'num_threads', 2)
            feature_dim = getattr(self.config.asr.model, 'feature_dim', 80)
            decoding_method = getattr(self.config.asr.model, 'decoding_method', "greedy_search")
        
        self._recognizer = sherpa_onnx.OfflineRecognizer.from_paraformer(
            paraformer=paraformer,
            tokens=tokens,
            num_threads=num_threads,
            sample_rate=self._target_asr_sample_rate,
            feature_dim=feature_dim,
            decoding_method=decoding_method,
            debug=False
        )
        logger.info(f"ASR模型加载完成: {paraformer} (期望输入采样率: {self._target_asr_sample_rate} Hz)")

    def _select_input_device(self) -> Tuple[Optional[Union[int, str]], int, str]:
        """选择输入设备"""
        target_keyword = None
        if self.config and hasattr(self.config, 'asr') and hasattr(self.config.asr, 'target_microphone_keyword'):
            target_keyword = self.config.asr.target_microphone_keyword
        
        asr_target_sr = self._target_asr_sample_rate
        device_to_open_sr = asr_target_sr
        device_name_for_log = "默认设备"

        if not target_keyword:
            logger.info(f"配置中未指定 target_microphone_keyword，尝试使用默认输入设备。ASR目标采样率: {asr_target_sr}Hz。")
            try:
                sd.check_input_settings(samplerate=asr_target_sr, channels=1)
                logger.info(f"默认输入设备原生支持 {asr_target_sr}Hz 和单通道。")
                device_to_open_sr = asr_target_sr
                default_device_info = sd.query_devices(kind='input')
                if default_device_info and isinstance(default_device_info, dict):
                    device_name_for_log = default_device_info.get('name', "默认设备")
                return None, device_to_open_sr, device_name_for_log
            except sd.PortAudioError:
                logger.warning(f"默认输入设备不支持ASR目标采样率 {asr_target_sr}Hz。将尝试以其自身默认采样率打开并进行重采样。")
                try:
                    default_device_info = sd.query_devices(kind='input')
                    if default_device_info and isinstance(default_device_info, dict):
                         device_to_open_sr = int(default_device_info['default_samplerate'])
                         device_name_for_log = default_device_info.get('name', "默认设备")
                         sd.check_input_settings(samplerate=device_to_open_sr, channels=1, device=None)
                         logger.info(f"将以默认设备 ({device_name_for_log}) 的默认采样率 {device_to_open_sr}Hz 打开，并重采样到 {asr_target_sr}Hz。")
                         return None, device_to_open_sr, device_name_for_log
                    else:
                        raise sd.PortAudioError("无法获取默认输入设备信息。")
                except Exception as e_default_sr:
                    logger.error(f"尝试获取或检查默认设备默认采样率失败: {e_default_sr}。ASR服务无法启动。")
                    return "ERROR_DEFAULT_DEVICE_UNSUITABLE", asr_target_sr, "错误"

        return None, device_to_open_sr, device_name_for_log

    async def start_realtime_recognition(self, callback: Callable[[str], None]):
        """启动实时麦克风识别"""
        if self.is_realtime_running:
            logger.warning("实时识别已在运行")
            return

        # 保存当前事件循环
        self.event_loop = asyncio.get_running_loop()
        self.callback = callback
        self.is_realtime_running = True

        # 在新线程中运行实时识别
        self.realtime_thread = threading.Thread(target=self._main_loop)
        self.realtime_thread.daemon = True
        self.realtime_thread.start()

        logger.info("实时麦克风识别已启动")

    def stop_realtime_recognition(self):
        """停止实时识别"""
        if not self.is_realtime_running:
            return
        
        self._running = False
        self.is_realtime_running = False
        
        if self.audio_stream:
            try:
                self.audio_stream.stop()
                self.audio_stream.close()
            except:
                pass
            self.audio_stream = None
        
        if self.realtime_thread:
            self.realtime_thread.join(timeout=2)
            self.realtime_thread = None
        
        logger.info("实时麦克风识别已停止")

    def _main_loop(self):
        """主循环，参考eu03实现"""
        self._running = True
        
        input_device_specifier, actual_sr_to_open, device_name = self._select_input_device()
        self._device_actual_sample_rate = actual_sr_to_open
        self._input_device_name_for_log = device_name

        if isinstance(input_device_specifier, str) and "ERROR" in input_device_specifier:
            logger.error(f"无法配置合适的输入设备 ({input_device_specifier})，ASR 服务无法启动。")
            return

        samples_per_read = int(0.1 * self._device_actual_sample_rate)
        buffer = np.array([], dtype=np.float32)
        logger.info("ASR服务已启动，等待语音输入...")

        logger.info(f"尝试打开音频流，设备: {self._input_device_name_for_log}, "
                    f"请求打开采样率: {self._device_actual_sample_rate}Hz, "
                    f"ASR目标采样率: {self._target_asr_sample_rate}Hz")

        try:
             with sd.InputStream(
                 device=input_device_specifier,
                 channels=1,
                 dtype="float32",
                 samplerate=self._device_actual_sample_rate
             ) as s:
                logger.info(f"音频流成功打开 ({self._input_device_name_for_log} @ {self._device_actual_sample_rate}Hz)，进入主循环...")
                while self._running:
                    try:
                        samples, _ = s.read(samples_per_read)
                        samples = samples.reshape(-1)

                        # 如果设备采样率与ASR模型所需采样率不同，则进行重采样
                        if self._device_actual_sample_rate != self._target_asr_sample_rate:
                            samples = resample_audio(samples, self._device_actual_sample_rate, self._target_asr_sample_rate)
                        
                        if len(samples) == 0:
                            continue

                        buffer = np.concatenate([buffer, samples])

                        # VAD处理
                        while len(buffer) >= self._window_size:
                            self._vad.accept_waveform(buffer[:self._window_size])
                            buffer = buffer[self._window_size:]

                        # ASR处理
                        while not self._vad.empty():
                            stream = self._recognizer.create_stream()
                            stream.accept_waveform(self._target_asr_sample_rate, self._vad.front.samples)
                            self._vad.pop()
                            self._recognizer.decode_stream(stream)
                            text = stream.result.text.strip()
                            if text and self.callback:
                                logger.info(f"ASR识别结果: {text}")
                                # 使用线程安全的方式调用异步回调
                                if self.event_loop:
                                    asyncio.run_coroutine_threadsafe(
                                        self._safe_callback(text),
                                        self.event_loop
                                    )

                    except Exception as e:
                         logger.error(f"ASR服务主循环内部异常: {e}", exc_info=True)
                         time.sleep(0.1)
                logger.info("ASR 服务主循环已终止。")

        except sd.PortAudioError as pae:
             logger.error(f"打开音频输入流失败: {pae}", exc_info=True)
        except Exception as e:
             logger.error(f"ASR服务主循环启动或流管理时发生意外异常: {e}", exc_info=True)

    async def _safe_callback(self, text: str):
        """线程安全的异步回调"""
        try:
            if self.callback:
                if asyncio.iscoroutinefunction(self.callback):
                    await self.callback(text)
                else:
                    self.callback(text)
        except Exception as e:
            logger.error(f"回调函数执行失败: {e}")

    def recognize_audio_file(self, audio_data: bytes) -> str:
        """识别音频文件"""
        try:
            logger.info(f"开始处理音频文件，大小: {len(audio_data)} bytes")
            
            # 将字节数据转换为音频数组
            audio_array, sample_rate = self._bytes_to_audio_array(audio_data)
            logger.info(f"音频数据转换完成，原始采样率: {sample_rate}, 时长: {len(audio_array)/sample_rate:.2f}秒")
            
            # 重采样到目标采样率
            if sample_rate != self._target_asr_sample_rate:
                logger.info(f"重采样音频: {sample_rate} -> {self._target_asr_sample_rate}")
                audio_array = resample_audio(audio_array, sample_rate, self._target_asr_sample_rate)
            
            # 执行识别
            result = self._recognize_with_sherpa(audio_array)
            
            if not result or not result.strip():
                return "未识别到语音内容"
            
            return result
            
        except Exception as e:
            logger.error(f"音频文件识别失败: {e}", exc_info=True)
            return f"识别失败: {str(e)}"

    def _bytes_to_audio_array(self, audio_data: bytes) -> tuple:
        """将字节数据转换为音频数组"""
        import io
        import soundfile as sf
        from pydub import AudioSegment
        
        try:
            # 首先尝试使用soundfile直接读取
            try:
                audio_io = io.BytesIO(audio_data)
                audio_array, sample_rate = sf.read(audio_io)
                logger.info("使用soundfile成功读取音频")
            except Exception as sf_error:
                logger.info(f"soundfile读取失败: {sf_error}，尝试使用pydub")
                
                # 使用pydub处理其他格式
                audio_io = io.BytesIO(audio_data)
                
                # 尝试不同的音频格式
                audio_segment = None
                formats_to_try = ['mp3', 'm4a', 'ogg', 'aac', 'wav', 'flac']
                
                for fmt in formats_to_try:
                    try:
                        audio_io.seek(0)
                        audio_segment = AudioSegment.from_file(audio_io, format=fmt)
                        logger.info(f"使用pydub成功读取音频，格式: {fmt}")
                        break
                    except Exception as fmt_error:
                        logger.debug(f"格式 {fmt} 读取失败: {fmt_error}")
                        continue
                
                if audio_segment is None:
                    raise ValueError("无法识别音频格式")
                
                # 转换为numpy数组
                raw_data = audio_segment.raw_data
                sample_rate = audio_segment.frame_rate
                channels = audio_segment.channels
                sample_width = audio_segment.sample_width
                
                # 根据采样宽度确定数据类型
                if sample_width == 1:
                    dtype = np.uint8
                elif sample_width == 2:
                    dtype = np.int16
                elif sample_width == 4:
                    dtype = np.int32
                else:
                    raise ValueError(f"不支持的采样宽度: {sample_width}")
                
                # 转换为numpy数组
                audio_array = np.frombuffer(raw_data, dtype=dtype)
                
                # 如果是立体声，转换为单声道
                if channels == 2:
                    audio_array = audio_array.reshape(-1, 2)
                    audio_array = np.mean(audio_array, axis=1)
                elif channels > 2:
                    audio_array = audio_array.reshape(-1, channels)
                    audio_array = np.mean(audio_array, axis=1)
                
                # 归一化到[-1, 1]范围
                if dtype == np.uint8:
                    audio_array = (audio_array.astype(np.float32) - 128) / 128
                elif dtype == np.int16:
                    audio_array = audio_array.astype(np.float32) / 32768
                elif dtype == np.int32:
                    audio_array = audio_array.astype(np.float32) / 2147483648
            
            # 确保是单声道
            if len(audio_array.shape) > 1:
                audio_array = np.mean(audio_array, axis=1)
            
            # 转换为float32
            audio_array = audio_array.astype(np.float32)
            
            logger.info(f"音频转换成功: 采样率={sample_rate}, 时长={len(audio_array)/sample_rate:.2f}秒")
            return audio_array, sample_rate
            
        except Exception as e:
            error_msg = f"音频数据转换失败: {str(e)}"
            logger.error(error_msg)
            raise ValueError(error_msg)

    def _recognize_with_sherpa(self, audio_array: np.ndarray) -> str:
        """使用sherpa-onnx进行识别"""
        try:
            logger.info(f"开始sherpa识别，音频长度: {len(audio_array)}, 采样率: {self._target_asr_sample_rate}")
            
            # 确保音频数据是正确的格式
            if audio_array.dtype != np.float32:
                audio_array = audio_array.astype(np.float32)
            
            # 创建音频流
            stream = self._recognizer.create_stream()
            
            # 输入音频数据
            stream.accept_waveform(self._target_asr_sample_rate, audio_array)
            
            # 执行识别
            self._recognizer.decode_stream(stream)
            
            # 获取结果
            result = stream.result
            
            if result and hasattr(result, 'text') and result.text:
                text = result.text.strip()
                logger.info(f"sherpa识别成功: {text}")
                return text
            else:
                logger.warning("sherpa识别结果为空")
                return "未识别到语音内容"
                
        except Exception as e:
            logger.error(f"sherpa识别失败: {e}", exc_info=True)
            return f"识别失败: {str(e)}"

    def cleanup(self):
        """清理资源"""
        self.stop_realtime_recognition()
        logger.info("ASR服务已清理")