import os
import uuid
import asyncio
from typing import Dict, List, Optional, Any
from pathlib import Path
import numpy as np
from funasr import AutoModel
from funasr.utils.postprocess_utils import rich_transcription_postprocess
from utils.log_util import logger


class FunASREngine:
    """
    FunASR 语音识别引擎封装类
    """
    
    def __init__(self):
        self.models: Dict[str, AutoModel] = {}
        self.model_configs = {
            # 非流式模型
            'sensevoice-small': {
                'model_name': 'iic/SenseVoiceSmall',
                'vad_model': 'fsmn-vad',
                'punc_model': None,
                'streaming': False
            },
            'paraformer-zh': {
                'model_name': 'iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch',
                'vad_model': 'damo/speech_fsmn_vad_zh-cn-16k-common-pytorch',
                'punc_model': 'damo/punc_ct-transformer_cn-en-common-vocab471067-large',
                'streaming': False
            },
            # 流式模型
            'paraformer-zh-streaming': {
                'model_name': 'iic/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch',
                'streaming': True
            }
        }
        
    async def init_model(self, model_name: str = 'sensevoice-small', device: str = 'cpu') -> bool:
        """
        初始化语音识别模型
        
        :param model_name: 模型名称
        :param device: 设备类型 (cpu/cuda)
        :return: 初始化是否成功
        """
        try:
            if model_name in self.models:
                logger.info(f"模型 {model_name} 已经初始化")
                return True
                
            if model_name not in self.model_configs:
                logger.error(f"不支持的模型: {model_name}")
                return False
                
            config = self.model_configs[model_name]
            logger.info(f"开始初始化模型: {model_name}")
            
            # 创建模型参数
            model_kwargs = {
                'model': config['model_name'],
                'device': device,
                'disable_update': True,  # 禁用自动更新
            }
            
            # 添加VAD模型
            if config.get('vad_model'):
                model_kwargs['vad_model'] = config['vad_model']
                model_kwargs['vad_kwargs'] = {"max_single_segment_time": 30000}
                
            # 添加标点模型
            if config.get('punc_model'):
                model_kwargs['punc_model'] = config['punc_model']
            
            # 在后台线程中初始化模型（FunASR不是异步的）
            loop = asyncio.get_event_loop()
            model = await loop.run_in_executor(None, lambda: AutoModel(**model_kwargs))
            
            self.models[model_name] = model
            logger.info(f"模型 {model_name} 初始化成功")
            return True
            
        except Exception as e:
            logger.error(f"模型 {model_name} 初始化失败: {str(e)}")
            return False
    
    async def recognize_file(self, 
                           file_path: str, 
                           model_name: str = 'sensevoice-small',
                           language: str = 'auto',
                           use_itn: bool = True,
                           use_vad: bool = True,
                           hotwords: Optional[List[str]] = None) -> Dict[str, Any]:
        """
        识别音频文件
        
        :param file_path: 音频文件路径
        :param model_name: 使用的模型名称
        :param language: 语言类型
        :param use_itn: 是否使用逆文本归一化
        :param use_vad: 是否使用VAD
        :param hotwords: 热词列表
        :return: 识别结果
        """
        try:
            # 确保模型已初始化
            if model_name not in self.models:
                success = await self.init_model(model_name)
                if not success:
                    raise Exception(f"模型 {model_name} 初始化失败")
            
            model = self.models[model_name]
            
            # 准备识别参数
            generate_kwargs = {
                'input': file_path,
                'cache': {},
                'language': language,
                'use_itn': use_itn,
                'batch_size_s': 60,
            }
            
            # 添加热词
            if hotwords:
                generate_kwargs['hotword'] = ' '.join(hotwords)
            
            # 如果模型支持VAD
            if use_vad and self.model_configs[model_name].get('vad_model'):
                generate_kwargs['merge_vad'] = True
                generate_kwargs['merge_length_s'] = 15
            
            # 在后台线程中执行识别
            loop = asyncio.get_event_loop()
            result = await loop.run_in_executor(None, lambda: model.generate(**generate_kwargs))
            
            # 处理结果
            if result and len(result) > 0:
                text = result[0]['text']
                
                # 如果是SenseVoice模型，进行后处理
                if model_name == 'sensevoice-small':
                    text = rich_transcription_postprocess(text)
                
                # 计算置信度（简单估算）
                confidence = result[0].get('confidence', 0.95)
                
                return {
                    'text': text,
                    'confidence': confidence,
                    'raw_result': result[0],
                    'model_name': model_name,
                    'language': language
                }
            else:
                return {
                    'text': '',
                    'confidence': 0.0,
                    'raw_result': None,
                    'model_name': model_name,
                    'language': language
                }
                
        except Exception as e:
            logger.error(f"音频识别失败: {str(e)}")
            raise e
    
    async def recognize_streaming_chunk(self, 
                                      audio_chunk: bytes,
                                      session_id: str,
                                      model_name: str = 'paraformer-zh-streaming',
                                      is_final: bool = False,
                                      chunk_size: List[int] = [0, 10, 5]) -> Dict[str, Any]:
        """
        流式语音识别（处理音频块）
        
        :param audio_chunk: 音频数据块
        :param session_id: 会话ID
        :param model_name: 模型名称
        :param is_final: 是否为最后一块
        :param chunk_size: 块大小配置
        :return: 识别结果
        """
        try:
            # 确保模型已初始化
            if model_name not in self.models:
                success = await self.init_model(model_name)
                if not success:
                    raise Exception(f"模型 {model_name} 初始化失败")
            
            model = self.models[model_name]
            
            # 获取或创建会话缓存
            if not hasattr(self, 'session_caches'):
                self.session_caches = {}
            
            if session_id not in self.session_caches:
                self.session_caches[session_id] = {}
            
            cache = self.session_caches[session_id]
            
            # 将字节数据转换为numpy数组（假设是16kHz 16bit PCM）
            audio_data = np.frombuffer(audio_chunk, dtype=np.int16).astype(np.float32) / 32768.0
            
            # 准备识别参数
            generate_kwargs = {
                'input': audio_data,
                'cache': cache,
                'is_final': is_final,
                'chunk_size': chunk_size,
                'encoder_chunk_look_back': 4,
                'decoder_chunk_look_back': 1
            }
            
            # 在后台线程中执行识别
            loop = asyncio.get_event_loop()
            result = await loop.run_in_executor(None, lambda: model.generate(**generate_kwargs))
            
            # 处理结果
            if result and len(result) > 0:
                text = result[0]['text']
                confidence = result[0].get('confidence', 0.9)
                
                return {
                    'text': text,
                    'confidence': confidence,
                    'is_final': is_final,
                    'session_id': session_id,
                    'model_name': model_name
                }
            else:
                return {
                    'text': '',
                    'confidence': 0.0,
                    'is_final': is_final,
                    'session_id': session_id,
                    'model_name': model_name
                }
                
        except Exception as e:
            logger.error(f"流式识别失败: {str(e)}")
            raise e
    
    def clear_session_cache(self, session_id: str):
        """
        清除会话缓存
        
        :param session_id: 会话ID
        """
        if hasattr(self, 'session_caches') and session_id in self.session_caches:
            del self.session_caches[session_id]
            logger.info(f"已清除会话 {session_id} 的缓存")
    
    def get_supported_models(self) -> List[str]:
        """
        获取支持的模型列表
        
        :return: 模型名称列表
        """
        return list(self.model_configs.keys())
    
    def is_model_streaming(self, model_name: str) -> bool:
        """
        检查模型是否支持流式识别
        
        :param model_name: 模型名称
        :return: 是否支持流式
        """
        return self.model_configs.get(model_name, {}).get('streaming', False)


# 全局单例实例
funasr_engine = FunASREngine() 