"""实时语音识别（ASR）模块
基于阿里云DashScope的Qwen-ASR-Realtime模型
"""
import os
import json
import logging
from typing import Callable, Optional
import dashscope
from dashscope.audio.qwen_omni import (
    OmniRealtimeConversation,
    OmniRealtimeCallback,
    MultiModality,
    AudioFormat
)

logger = logging.getLogger(__name__)


class RealtimeASR:
    """实时语音识别客户端
    
    使用阿里云DashScope的Qwen-ASR-Realtime模型进行实时语音识别
    """
    
    def __init__(
        self,
        api_key: Optional[str] = None,
        language: str = 'zh',
        sample_rate: int = 16000,
        input_audio_format: str = 'pcm',
        corpus_text: Optional[str] = None,
        enable_turn_detection: bool = True,
        turn_detection_threshold: float = 0.2,
        turn_detection_silence_duration_ms: int = 800,
        region: str = 'beijing'
    ):
        """初始化ASR客户端
        
        Args:
            api_key: DashScope API Key，如果为None则从环境变量获取
            language: 音频源语言，默认'zh'（中文）
            sample_rate: 音频采样率，支持16000和8000，默认16000
            input_audio_format: 音频格式，支持pcm、opus和wav，默认pcm
            corpus_text: 上下文文本，用于提高识别准确率
            enable_turn_detection: 是否开启服务端VAD，默认True
            turn_detection_threshold: VAD检测阈值，默认0.2
            turn_detection_silence_duration_ms: VAD断句检测阈值(ms)，默认800
            region: 服务区域，'beijing'或'singapore'
        """
        # 设置API Key
        if api_key:
            dashscope.api_key = api_key
        elif 'DASHSCOPE_API_KEY' in os.environ:
            dashscope.api_key = os.environ['DASHSCOPE_API_KEY']
        else:
            raise ValueError('未设置DASHSCOPE_API_KEY')
        
        # 设置服务URL
        if region == 'singapore':
            self.url = 'wss://dashscope-intl.aliyuncs.com/api-ws/v1/realtime'
        else:
            self.url = 'wss://dashscope.aliyuncs.com/api-ws/v1/realtime'
        
        self.language = language
        self.sample_rate = sample_rate
        self.input_audio_format = input_audio_format
        self.corpus_text = corpus_text
        self.enable_turn_detection = enable_turn_detection
        self.turn_detection_threshold = turn_detection_threshold
        self.turn_detection_silence_duration_ms = turn_detection_silence_duration_ms
        
        self.conversation = None
        self.is_connected = False
        
        # 回调函数
        self.on_transcription_callback = None
        self.on_speech_start_callback = None
        self.on_speech_stop_callback = None
        self.on_error_callback = None
    
    def connect(
        self,
        on_transcription: Callable[[str, bool], None],
        on_speech_start: Optional[Callable[[], None]] = None,
        on_speech_stop: Optional[Callable[[], None]] = None,
        on_error: Optional[Callable[[str], None]] = None
    ):
        """建立ASR连接
        
        Args:
            on_transcription: 识别结果回调，参数为(文本, 是否完成)
            on_speech_start: 语音开始回调
            on_speech_stop: 语音停止回调
            on_error: 错误回调
        """
        self.on_transcription_callback = on_transcription
        self.on_speech_start_callback = on_speech_start
        self.on_speech_stop_callback = on_speech_stop
        self.on_error_callback = on_error
        
        # 创建回调处理器
        callback_handler = ASRCallbackHandler(
            on_transcription=on_transcription,
            on_speech_start=on_speech_start,
            on_speech_stop=on_speech_stop,
            on_error=on_error
        )
        
        try:
            # 创建会话
            self.conversation = OmniRealtimeConversation(
                model='qwen3-asr-flash-realtime',
                callback=callback_handler,
                url=self.url
            )
            
            # 建立连接
            self.conversation.connect()
            
            # 更新会话配置
            # 根据文档，需要使用AudioFormat枚举和TranscriptionParams对象
            
            # 手动定义TranscriptionParams类，因为从SDK导入失败
            class TranscriptionParams:
                def __init__(self, language='zh', sample_rate=16000, input_audio_format='pcm', corpus_text=None):
                    self.language = language
                    self.sample_rate = sample_rate
                    self.input_audio_format = input_audio_format
                    self.corpus_text = corpus_text
                    # SDK内部可能检查corpus属性
                    self.corpus = corpus_text
            
            # 创建TranscriptionParams对象
            transcription_params = TranscriptionParams(
                language=self.language,
                sample_rate=self.sample_rate,
                input_audio_format=self.input_audio_format, # 这里是字符串 'pcm'
                corpus_text=self.corpus_text
            )
            
            # 调用update_session
            # 注意：不传递外层的input_audio_format参数，让其使用默认值
            # 外层的input_audio_format似乎是用于input_audio_transcription_model的，或者用于输入音频的格式检查
            # 如果我们只做ASR，应该主要关注transcription_params
            self.conversation.update_session(
                output_modalities=[MultiModality.TEXT],
                enable_turn_detection=self.enable_turn_detection,
                turn_detection_type='server_vad',
                turn_detection_threshold=self.turn_detection_threshold,
                turn_detection_silence_duration_ms=self.turn_detection_silence_duration_ms,
                transcription_params=transcription_params
            )
            
            logger.info(f"ASR会话已配置: language={self.language}, format={self.input_audio_format}")
            
            self.is_connected = True
            logger.info("ASR连接已建立")
            
        except Exception as e:
            logger.error(f"ASR连接失败: {e}")
            if on_error:
                on_error(f"连接失败: {str(e)}")
            raise
    
    def append_audio(self, audio_b64: str):
        """追加音频数据
        
        Args:
            audio_b64: Base64编码的音频数据
        """
        if not self.is_connected or not self.conversation:
            logger.warning("ASR未连接，无法发送音频")
            return
        
        try:
            self.conversation.append_audio(audio_b64)
        except Exception as e:
            logger.error(f"发送音频失败: {e}")
            if self.on_error_callback:
                self.on_error_callback(f"发送音频失败: {str(e)}")
    
    def commit(self):
        """手动提交音频缓冲区（仅在禁用VAD时使用）"""
        if not self.is_connected or not self.conversation:
            logger.warning("ASR未连接，无法提交")
            return
        
        try:
            self.conversation.commit()
        except Exception as e:
            logger.error(f"提交音频失败: {e}")
            if self.on_error_callback:
                self.on_error_callback(f"提交音频失败: {str(e)}")
    
    def close(self):
        """关闭ASR连接"""
        if self.conversation:
            try:
                self.conversation.close()
                logger.info("ASR连接已关闭")
            except Exception as e:
                logger.error(f"关闭ASR连接失败: {e}")
            finally:
                self.is_connected = False
                self.conversation = None


class ASRCallbackHandler(OmniRealtimeCallback):
    """ASR回调处理器"""
    
    def __init__(
        self,
        on_transcription: Callable[[str, bool], None],
        on_speech_start: Optional[Callable[[], None]] = None,
        on_speech_stop: Optional[Callable[[], None]] = None,
        on_error: Optional[Callable[[str], None]] = None
    ):
        self.on_transcription = on_transcription
        self.on_speech_start = on_speech_start
        self.on_speech_stop = on_speech_stop
        self.on_error = on_error
    
    def on_open(self):
        """WebSocket连接成功建立"""
        logger.info("ASR WebSocket连接已建立")
    
    def on_event(self, message):
        """收到服务端事件"""
        try:
            # message可能已经是dict，也可能是str
            if isinstance(message, str):
                event = json.loads(message)
            elif isinstance(message, dict):
                event = message
            else:
                logger.warning(f"未知的消息类型: {type(message)}")
                return
            
            event_type = event.get('type', '')
            
            if event_type == 'session.created':
                session_id = event.get('session', {}).get('id', '')
                logger.info(f"ASR会话已创建: {session_id}")
            
            elif event_type == 'session.updated':
                logger.info("ASR会话配置已更新")
            
            elif event_type == 'input_audio_buffer.speech_started':
                logger.info("检测到语音开始")
                if self.on_speech_start:
                    self.on_speech_start()
            
            elif event_type == 'input_audio_buffer.speech_stopped':
                logger.info("检测到语音停止")
                if self.on_speech_stop:
                    self.on_speech_stop()
            
            elif event_type == 'conversation.item.input_audio_transcription.text':
                # 流式识别结果（部分）
                text = event.get('text', '')
                if text and self.on_transcription:
                    self.on_transcription(text, False)
            
            elif event_type == 'conversation.item.input_audio_transcription.completed':
                # 完整识别结果
                transcript = event.get('transcript', '')
                if transcript and self.on_transcription:
                    self.on_transcription(transcript, True)
            
            elif event_type == 'error':
                error_msg = event.get('error', {}).get('message', '未知错误')
                logger.error(f"ASR错误: {error_msg}")
                if self.on_error:
                    self.on_error(error_msg)
        
        except json.JSONDecodeError as e:
            logger.error(f"解析ASR事件失败: {e}")
        except Exception as e:
            logger.error(f"处理ASR事件失败: {e}")
    
    def on_close(self, close_status_code, close_msg):
        """WebSocket连接关闭"""
        logger.info(f"ASR WebSocket连接已关闭: {close_status_code} - {close_msg}")
