"""
流式TTS处理工具模块
提供文本断句、流式TTS等功能
"""

import re
import asyncio
import logging
import time
import os
from typing import List, AsyncGenerator, Dict, Any
from pathlib import Path

logger = logging.getLogger(__name__)

def split_text_by_punctuation(text: str) -> List[str]:
    """
    根据标点符号智能断句

    Args:
        text: 输入文本

    Returns:
        断句后的文本列表
    """
    if not text or not text.strip():
        return []

    # 预处理：过滤特殊字符，确保文本干净
    try:
        from core.tts.filter import filter_special_chars
        text = filter_special_chars(text)
    except ImportError:
        pass  # 如果导入失败，继续处理原文本

    sentence_endings = r'[。！？.!?；;]'
    sentences = []
    current_sentence = ""

    for char in text:
        current_sentence += char

        if re.match(sentence_endings, char):
            sentence = current_sentence.strip()
            if sentence:
                sentences.append(sentence)
            current_sentence = ""

    if current_sentence.strip():
        sentences.append(current_sentence.strip())

    # 智能合并短句，确保句子不会太碎片化
    from config import Config
    filtered_sentences = []
    min_sentence_chars = Config.INDEX_TTS_STREAMING_MIN_SENTENCE_CHARS

    i = 0
    while i < len(sentences):
        sentence = sentences[i].strip()
        if len(sentence) < min_sentence_chars and i + 1 < len(sentences):
            next_sentence = sentences[i + 1].strip()
            merged_sentence = sentence + next_sentence
            if len(merged_sentence) <= min_sentence_chars * 2:
                filtered_sentences.append(merged_sentence)
                i += 2
            else:
                if len(sentence) >= 2:
                    filtered_sentences.append(sentence)
                i += 1
        else:
            if len(sentence) >= 2:
                filtered_sentences.append(sentence)
            i += 1

    return filtered_sentences

# 全局TTS引擎实例
_tts_engine = None

async def synthesize_speech_segment(text: str, sessionid: str, any4dh_reals: Dict) -> Dict[str, Any]:
    """
    为单个文本段合成语音并同步到数字人
    支持IndexTTS和EdgeTTS，优先使用IndexTTS，失败时使用EdgeTTS

    Args:
        text: 文本段
        sessionid: 会话ID
        any4dh_reals: 数字人实例字典

    Returns:
        音频段信息字典
    """
    global _tts_engine
    start_time = time.time()

    try:
        # 修复sessionid类型不匹配问题
        try:
            sessionid_key = int(sessionid) if isinstance(sessionid, str) else sessionid
        except (ValueError, TypeError):
            sessionid_key = sessionid

        # 验证session有效性
        if not sessionid_key or sessionid_key not in any4dh_reals:
            logger.warning(f"TTS: sessionid not found - {sessionid_key} (original: {sessionid})")
            return {
                'success': False,
                'text': text,
                'error': 'Session not found'
            }

        # 获取配置
        from config import Config

        # 过滤特殊字符
        try:
            from core.tts.filter import filter_special_chars
            text = filter_special_chars(text)
        except ImportError:
            pass

        # 方案1：如果IndexTTS启用，优先使用IndexTTS
        if Config.INDEX_TTS_MODEL_ENABLED:
            return await _synthesize_with_index_tts(text, sessionid_key, any4dh_reals, start_time)

        # 方案2：如果IndexTTS未启用，使用EdgeTTS外部合成
        elif Config.EDGE_TTS_ENABLED:
            return await _synthesize_with_external_edge_tts(text, sessionid_key, any4dh_reals, start_time)

        # 方案3：如果外部TTS都未启用，使用数字人内部TTS
        else:
            return await _synthesize_with_internal_tts(text, sessionid_key, any4dh_reals, start_time)

    except Exception as e:
        logger.error(f"TTS processing failed: {str(e)}")
        return {
            'success': False,
            'text': text,
            'error': str(e)
        }

async def _synthesize_with_index_tts(text: str, sessionid_key: int, any4dh_reals: Dict, start_time: float) -> Dict[str, Any]:
    """使用IndexTTS合成语音"""
    global _tts_engine

    try:
        from core.tts.index_tts_engine import IndexTTSEngine
        from config import Config
        from core.tts.temp_file_manager import create_temp_stream_file

        output_path = create_temp_stream_file()

        def tts_call():
            global _tts_engine
            # 只在第一次调用时创建TTS引擎实例
            if _tts_engine is None:
                optimized_config = {
                    'model_path': Config.INDEX_TTS_MODEL_DIR,
                    'device': Config.INDEX_TTS_DEVICE,
                    'min_request_interval': 0.001,
                    'max_tokens': Config.INDEX_TTS_FAST_MAX_TOKENS,
                    'bucket_size': Config.INDEX_TTS_FAST_BATCH_SIZE,
                    'fast_mode': Config.INDEX_TTS_FAST_ENABLED,
                    'timeout': Config.INDEX_TTS_TIMEOUT
                }
                _tts_engine = IndexTTSEngine.get_instance(optimized_config)

            return _tts_engine.generate_speech(
                text=text,
                output_path=str(output_path),
                voice="default"
            )

        # 在线程池中执行IndexTTS调用
        success = await asyncio.get_event_loop().run_in_executor(None, tts_call)
        generation_time = time.time() - start_time

        if success and os.path.exists(output_path):
            # 同步音频到数字人
            return await _sync_audio_to_digital_human(output_path, text, sessionid_key, any4dh_reals, generation_time, 'index-tts')
        else:
            logger.warning(f"IndexTTS failed: {text[:20]}..., falling back to EdgeTTS")
            # IndexTTS失败，fallback到EdgeTTS
            if Config.EDGE_TTS_ENABLED:
                return await _synthesize_with_external_edge_tts(text, sessionid_key, any4dh_reals, start_time)
            else:
                return await _synthesize_with_internal_tts(text, sessionid_key, any4dh_reals, start_time)

    except Exception as e:
        logger.error(f"IndexTTS processing failed: {str(e)}, falling back to EdgeTTS")
        # IndexTTS异常，fallback到EdgeTTS
        if Config.EDGE_TTS_ENABLED:
            return await _synthesize_with_external_edge_tts(text, sessionid_key, any4dh_reals, start_time)
        else:
            return await _synthesize_with_internal_tts(text, sessionid_key, any4dh_reals, start_time)

async def _synthesize_with_external_edge_tts(text: str, sessionid_key: int, any4dh_reals: Dict, start_time: float) -> Dict[str, Any]:
    """使用外部EdgeTTS合成语音"""
    try:
        from config import Config
        from edge_tts import Communicate

        # 创建临时文件
        import tempfile
        with tempfile.NamedTemporaryFile(suffix='.mp3', delete=False) as temp_file:
            output_path = temp_file.name

        # 使用EdgeTTS生成音频
        default_voice = getattr(Config, 'EDGE_DEFAULT_VOICE', 'zh-CN-YunxiaNeural')

        try:
            communicate = Communicate(text, default_voice)
            await communicate.save(output_path)
            generation_time = time.time() - start_time

            if os.path.exists(output_path) and os.path.getsize(output_path) > 0:
                # 同步音频到数字人
                return await _sync_audio_to_digital_human(output_path, text, sessionid_key, any4dh_reals, generation_time, 'edge-tts')
            else:
                logger.error("External EdgeTTS: output file is empty or missing")
                return await _synthesize_with_internal_tts(text, sessionid_key, any4dh_reals, start_time)

        except Exception as e:
            logger.error(f"External EdgeTTS synthesis failed: {e}")
            return await _synthesize_with_internal_tts(text, sessionid_key, any4dh_reals, start_time)

    except Exception as e:
        logger.error(f"External EdgeTTS setup failed: {e}")
        return await _synthesize_with_internal_tts(text, sessionid_key, any4dh_reals, start_time)

async def _synthesize_with_internal_tts(text: str, sessionid_key: int, any4dh_reals: Dict, start_time: float) -> Dict[str, Any]:
    """使用数字人内部TTS系统"""
    try:
        digital_human = any4dh_reals[sessionid_key]

        # 直接调用数字人内部TTS系统
        digital_human.put_msg_txt(text)

        generation_time = time.time() - start_time
        logger.info(f"TTS: text sent to internal digital human TTS - '{text[:30]}...'")

        return {
            'success': True,
            'text': text,
            'generation_time': generation_time,
            'tts_engine': 'internal',
            'method': 'direct_text'
        }

    except Exception as e:
        logger.error(f"Internal TTS failed: {e}")
        return {
            'success': False,
            'text': text,
            'error': f'Internal TTS failed: {str(e)}'
        }

async def _sync_audio_to_digital_human(audio_path: str, text: str, sessionid_key: int, any4dh_reals: Dict, generation_time: float, tts_engine: str) -> Dict[str, Any]:
    """将音频文件同步到数字人"""
    try:
        with open(audio_path, 'rb') as f:
            audio_bytes = f.read()

        # 发送音频数据到数字人
        any4dh_reals[sessionid_key].put_audio_file(audio_bytes)

        logger.info(f"{tts_engine}: audio synced to digital human - {len(audio_bytes)} bytes")

        # 清理临时文件
        try:
            os.remove(audio_path)
        except Exception as e:
            logger.warning(f"Failed to cleanup {tts_engine} temp file: {e}")

        return {
            'success': True,
            'text': text,
            'generation_time': generation_time,
            'tts_engine': tts_engine,
            'method': 'external_audio',
            'audio_size': len(audio_bytes)
        }

    except Exception as e:
        logger.error(f"{tts_engine} audio sync failed: {e}")
        # 清理临时文件
        try:
            os.remove(audio_path)
        except:
            pass

        return {
            'success': False,
            'text': text,
            'error': f'{tts_engine} audio sync failed: {str(e)}',
            'tts_engine': tts_engine
        }

async def process_llm_stream(text: str) -> AsyncGenerator[str, None]:
    """
    流式LLM处理

    Args:
        text: 输入文本

    Yields:
        LLM响应文本片段
    """
    try:
        from config import Config

        if getattr(Config, 'ANY4DH_USE_UNIFIED_INTERFACE', True):
            from core.chat.unified_interface import UnifiedLLMInterface

            async for chunk in UnifiedLLMInterface.generate_response_stream(
                content=text,
                sender="any4dh_user",
                user_nick="数字人用户",
                platform="any4dh",
                generation_id=f"any4dh_{int(time.time())}"
            ):
                if chunk and chunk.strip():
                    yield chunk
        else:
            from core.chat.llm import llm_service
            llm_service = llm_service()

            async for chunk in llm_service.generate_stream(text):
                if chunk and chunk.strip():
                    yield chunk

    except Exception as e:
        logger.error(f"LLM streaming processing failed: {str(e)}")
        yield "抱歉，我现在无法回答这个问题。"

class StreamingTTSProcessor:
    """流式TTS处理器"""

    def __init__(self, sessionid: str, any4dh_reals: Dict):
        self.sessionid = sessionid
        self.any4dh_reals = any4dh_reals
        self.accumulated_text = ""
        self.processed_segments = 0
        self.semaphore = asyncio.Semaphore(6)

    async def process_streaming_response(self, user_input: str) -> AsyncGenerator[Dict[str, Any], None]:
        """
        处理流式响应

        Args:
            initial_text: ASR识别的初始文本

        Yields:
            处理结果字典
        """
        try:
            # 发送初始状态
            yield {
                'type': 'status',
                'message': '正在生成回复...',
                'recognized_text': user_input
            }

            # 检查是否包含语音回复标记
            accumulated_text = ""
            voice_response_found = False

            async for chunk in process_llm_stream(user_input):
                accumulated_text += chunk

                # 检查是否有语音回复标记
                if not voice_response_found and "[VOICE_KB_RESPONSE:" in accumulated_text:
                    voice_response_found = True
                    voice_info = self._parse_voice_response(accumulated_text)
                    if voice_info:
                        # 直接处理语音文件
                        result = await self._process_voice_file(voice_info)

                        yield {
                            'type': 'audio_file',
                            'data': {
                                'audio_path': result['audio_path'],
                                'text': result['text'],
                                'is_predefined': True
                            }
                        }
                        return  # 语音回复直接结束，不再继续文本处理

                # 如果没有语音回复，继续文本处理
                if not voice_response_found:
                    # 检查是否有完整句子
                    sentences = split_text_by_punctuation(accumulated_text)

                    if len(sentences) > 1:
                        # 有完整句子，处理前面的句子
                        for sentence in sentences[:-1]:
                            try:
                                # 使用信号量控制并发
                                async with self.semaphore:
                                    result = await synthesize_speech_segment(
                                        sentence,
                                        self.sessionid,
                                        self.any4dh_reals
                                    )

                                yield {
                                    'type': 'audio_segment',
                                    'data': result,
                                    'partial_text': accumulated_text,
                                    'segment_index': self.processed_segments
                                }

                                self.processed_segments += 1

                                delay_time = 0.001
                                await asyncio.sleep(delay_time)

                            except Exception as e:
                                logger.error(f"Audio segment processing failed: {e}")
                                continue

                        # 保留最后的不完整句子
                        accumulated_text = sentences[-1] if sentences else accumulated_text

            # 处理最后的文本
            if not voice_response_found and accumulated_text.strip():
                try:
                    result = await synthesize_speech_segment(
                        accumulated_text,
                        self.sessionid,
                        self.any4dh_reals
                    )

                    yield {
                        'type': 'final_segment',
                        'data': result,
                        'complete_text': accumulated_text,
                        'segment_index': self.processed_segments,
                        'is_final': True
                    }

                except Exception as e:
                    logger.error(f"Final audio segment processing failed: {e}")

            # 发送完成状态
            yield {
                'type': 'completed',
                'message': '回复完成',
                'total_segments': self.processed_segments
            }

        except Exception as e:
            logger.error(f"Streaming processing failed: {str(e)}")
            yield {
                'type': 'error',
                'message': f'处理失败: {str(e)}'
            }

    def _parse_voice_response(self, response_text: str) -> Dict[str, Any]:
        """解析语音回复标记"""
        try:
            # 格式: [VOICE_KB_RESPONSE:filename:text_content]
            import re
            pattern = r'\[VOICE_KB_RESPONSE:([^:]+):(.+)\]'
            match = re.search(pattern, response_text)
            if match:
                return {
                    "audio_file": match.group(1),
                    "text": match.group(2)
                }
        except Exception as e:
            logger.error(f"Failed to parse voice response: {e}")
        return None

    async def _process_voice_file(self, voice_info: Dict[str, Any]) -> Dict[str, Any]:
        """处理语音文件"""
        try:
            from pathlib import Path

            def get_config():
                try:
                    from config import Config
                    return Config
                except ImportError as e:
                    logger.error(f"Failed to import Config: {e}")
                    return None

            config = get_config()
            if not config:
                return {"error": "Config not available"}

            audio_file = voice_info["audio_file"]
            audio_dir = Path(config.VOICE_KB_AUDIO_DIR)
            audio_path = audio_dir / audio_file

            if not audio_path.exists():
                logger.error(f"Voice file not found: {audio_path}")
                return {"error": "Voice file not found"}

            # 返回音频文件信息，any4dh前端可以直接使用
            return {
                "audio_path": str(audio_path),
                "text": voice_info["text"],
                "is_predefined": True,
                "audio_file": audio_file
            }

        except Exception as e:
            logger.error(f"Failed to process voice file: {e}")
            return {"error": str(e)}