#!/usr/bin/env python3
"""
文字转语音(TTS)服务
集成FFmpeg音频处理，支持多种音频格式和实时语音生成
"""

import os
import asyncio
import logging
import hashlib
import subprocess
from typing import Optional, Dict
from pathlib import Path
import tempfile
from datetime import datetime, timedelta

import edge_tts
import aiofiles
from pydantic import BaseModel

from ..core.config import settings, NovelProject

logger = logging.getLogger(__name__)


class VoiceConfig(BaseModel):
    """语音配置模型"""
    voice_name: str
    gender: str
    language: str = "zh-CN"
    rate: str = "+0%"
    volume: str = "+0%"
    pitch: str = "+0Hz"


class TTSService:
    """TTS服务类"""
    
    def __init__(self):
        self.audio_dir = Path(settings.STATIC_DIR) / "audio"
        self.audio_dir.mkdir(parents=True, exist_ok=True)
        
        # FFmpeg检查
        self.ffmpeg_available = self._check_ffmpeg()
        if not self.ffmpeg_available:
            logger.warning("FFmpeg未安装或不可用，将使用基础音频处理")
        
        # 语音配置
        self.voice_configs = self._load_voice_configs()
        
        # 音频缓存
        self.audio_cache = {}
        self.cache_expiry = {}
        
    def _check_ffmpeg(self) -> bool:
        """检查FFmpeg是否可用"""
        try:
            result = subprocess.run(
                ['ffmpeg', '-version'], 
                capture_output=True, 
                text=True, 
                timeout=5
            )
            return result.returncode == 0
        except (subprocess.SubprocessError, FileNotFoundError):
            return False
    
    def _load_voice_configs(self) -> Dict[str, Dict[str, VoiceConfig]]:
        """加载语音配置"""
        return {
            "male": {
                "zh-CN": VoiceConfig(
                    voice_name="zh-CN-YunjianNeural",
                    gender="male",
                    language="zh-CN",
                    rate="+0%",
                    volume="+0%",
                    pitch="+0Hz"
                ),
                "young": VoiceConfig(
                    voice_name="zh-CN-YunxiNeural", 
                    gender="male",
                    language="zh-CN",
                    rate="+10%",
                    volume="+0%",
                    pitch="+50Hz"
                ),
                "mature": VoiceConfig(
                    voice_name="zh-CN-YunyangNeural",
                    gender="male", 
                    language="zh-CN",
                    rate="-10%",
                    volume="+0%",
                    pitch="-50Hz"
                )
            },
            "female": {
                "zh-CN": VoiceConfig(
                    voice_name="zh-CN-XiaoxiaoNeural",
                    gender="female",
                    language="zh-CN",
                    rate="+0%",
                    volume="+0%",
                    pitch="+0Hz"
                ),
                "sweet": VoiceConfig(
                    voice_name="zh-CN-XiaoyiNeural",
                    gender="female",
                    language="zh-CN", 
                    rate="+5%",
                    volume="+0%",
                    pitch="+100Hz"
                ),
                "elegant": VoiceConfig(
                    voice_name="zh-CN-XiaochenNeural",
                    gender="female",
                    language="zh-CN",
                    rate="-5%", 
                    volume="+0%",
                    pitch="-30Hz"
                )
            }
        }
    
    def _get_voice_config(
        self, 
        character_name: str, 
        gender: str = "male",
        role_type: str = "supporting"
    ) -> VoiceConfig:
        """根据角色获取语音配置"""
        gender_configs = self.voice_configs.get(gender, self.voice_configs["male"])
        
        # 根据角色类型选择语音风格
        if gender == "male":
            if role_type in ["protagonist", "hero"]:
                return gender_configs.get("mature", gender_configs["zh-CN"])
            elif role_type in ["young", "child"]:
                return gender_configs.get("young", gender_configs["zh-CN"])
            else:
                return gender_configs["zh-CN"]
        else:  # female
            if role_type in ["heroine", "elegant"]:
                return gender_configs.get("elegant", gender_configs["zh-CN"])
            elif role_type in ["cute", "young"]:
                return gender_configs.get("sweet", gender_configs["zh-CN"])
            else:
                return gender_configs["zh-CN"]
    
    def _infer_gender_from_name(self, character_name: str) -> str:
        """从角色名称推断性别"""
        # 简单的性别推断逻辑
        female_indicators = [
            "小", "美", "雪", "雨", "花", "月", "星", 
            "娜", "莉", "婷", "雯", "琴", "萱"
        ]
        male_indicators = [
            "强", "刚", "军", "明", "华", "伟", "峰", 
            "龙", "虎", "杰", "斌", "勇", "辉"
        ]
        
        for indicator in female_indicators:
            if indicator in character_name:
                return "female"
        
        for indicator in male_indicators:
            if indicator in character_name:
                return "male"
        
        return "male"  # 默认为男性
    
    def _generate_audio_filename(
        self, text: str, voice_config: VoiceConfig
    ) -> str:
        """生成音频文件名"""
        hash_input = (
            f"{text}_{voice_config.voice_name}_"
            f"{voice_config.rate}_{voice_config.pitch}"
        )
        text_hash = hashlib.md5(hash_input.encode()).hexdigest()
        return f"tts_{text_hash}.mp3"
    
    async def _apply_ffmpeg_effects(
        self, 
        input_path: str, 
        output_path: str,
        effects: Dict[str, any] = None
    ) -> bool:
        """使用FFmpeg应用音频效果"""
        if not self.ffmpeg_available or not effects:
            return False
        
        try:
            cmd = ['ffmpeg', '-i', input_path, '-y']
            
            # 音频滤镜
            filters = []
            
            # 音量调整
            if effects.get('volume', 1.0) != 1.0:
                filters.append(f"volume={effects['volume']}")
            
            # 音调调整 (使用asetrate和atempo)
            if effects.get('pitch', 1.0) != 1.0:
                pitch_factor = effects['pitch']
                filters.append(f"asetrate=44100*{pitch_factor},aresample=44100")
            
            # 速度调整
            if effects.get('speed', 1.0) != 1.0:
                filters.append(f"atempo={effects['speed']}")
            
            # 回声效果
            if effects.get('echo', False):
                filters.append("aecho=0.8:0.9:1000:0.3")
            
            # 低通滤波器（模拟电话音质）
            if effects.get('telephone', False):
                filters.append("lowpass=f=3000")
            
            # 高通滤波器（去除低频噪音）
            if effects.get('highpass', False):
                filters.append("highpass=f=200")
            
            # 压缩器（平衡音量）
            if effects.get('compress', False):
                filters.append("acompressor=threshold=-20dB:ratio=4:attack=5:release=50")
            
            if filters:
                cmd.extend(['-af', ','.join(filters)])
            
            # 输出格式设置
            cmd.extend([
                '-acodec', 'libmp3lame',
                '-b:a', '128k',
                '-ar', '44100',
                '-ac', '2',
                output_path
            ])
            
            # 执行FFmpeg命令
            process = await asyncio.create_subprocess_exec(
                *cmd,
                stdout=asyncio.subprocess.PIPE,
                stderr=asyncio.subprocess.PIPE
            )
            
            stdout, stderr = await process.communicate()
            
            if process.returncode == 0:
                logger.info(f"FFmpeg处理成功: {output_path}")
                return True
            else:
                logger.error(f"FFmpeg处理失败: {stderr.decode()}")
                return False
                
        except Exception as e:
            logger.error(f"FFmpeg处理异常: {e}")
            return False
    
    async def _generate_speech(
        self, 
        text: str, 
        voice_config: VoiceConfig
    ) -> Optional[bytes]:
        """生成语音数据"""
        try:
            # 构建SSML
            ssml = f"""
            <speak version="1.0" xmlns="http://www.w3.org/2001/10/synthesis" xml:lang="{voice_config.language}">
                <voice name="{voice_config.voice_name}">
                    <prosody rate="{voice_config.rate}" volume="{voice_config.volume}" pitch="{voice_config.pitch}">
                        {text}
                    </prosody>
                </voice>
            </speak>
            """
            
            # 使用edge-tts生成语音
            communicate = edge_tts.Communicate(ssml, voice_config.voice_name)
            
            audio_data = b""
            async for chunk in communicate.stream():
                if chunk["type"] == "audio":
                    audio_data += chunk["data"]
            
            return audio_data
            
        except Exception as e:
            logger.error(f"语音生成失败: {e}")
            return None
    
    def _clean_text_for_tts(self, text: str) -> str:
        """清理文本，只保留AI对话内容"""
        import re
        
        # 记录原始文本用于调试
        logger.info(f"=== TTS文本处理开始 ===")
        logger.info(f"原始文本: {repr(text)}")
        logger.info(f"原始文本长度: {len(text)}")
        logger.info(f"原始文本前100字符: {text[:100]}")
        
        # 如果文本为空或只有空白字符，直接返回
        if not text or not text.strip():
            logger.warning("输入文本为空")
            return ""
        
        # 保存原始文本用于对比
        original_text = text
        
        # 1. 移除可能的时间戳格式
        # 如：[2024-01-01 12:00:00] 内容
        text = re.sub(r'\[\d{4}-\d{2}-\d{2}\s+\d{2}:\d{2}:\d{2}\]\s*', '', text)
        
        # 2. 移除角色名称前缀 
        # 如：张三: 内容 或 张三：内容
        text = re.sub(r'^[^:：]*[:：]\s*', '', text)
        
        # 3. 移除可能的系统标识
        # 如：[系统] 内容 或 【系统】内容
        text = re.sub(r'^[\[【][^】\]]*[\]】]\s*', '', text)
        
        # 4. 移除HTML标签（如果有）
        text = re.sub(r'<[^>]*>', '', text)
        
        # 5. 移除技术相关词汇和URL
        # 移除URL链接（http、https、ftp等）
        text = re.sub(r'https?://[^\s]+', '', text)
        text = re.sub(r'ftp://[^\s]+', '', text)
        text = re.sub(r'www\.[^\s]+', '', text)
        
        # 移除常见的技术词汇和命令
        tech_words = [
            # 网络和协议相关
            r'\bspeak\b', r'\bhttp\b', r'\bhttps\b', r'\bapi\b', r'\burl\b',
            r'\bftp\b', r'\bwww\b', r'\bcom\b', r'\bnet\b', r'\borg\b',
            
            # 编程语言和技术
            r'\bjson\b', r'\bxml\b', r'\bhtml\b', r'\bcss\b', r'\bjs\b',
            r'\bjavascript\b', r'\bpython\b', r'\bnode\b', r'\bnpm\b',
            r'\bgit\b', r'\bdocker\b', r'\bkubernetes\b', r'\baws\b',
            
            # 系统和服务相关
            r'\bserver\b', r'\bclient\b', r'\bdatabase\b', r'\bsql\b',
            r'\brest\b', r'\bgraphql\b', r'\bwebsocket\b', r'\btcp\b',
            r'\budp\b', r'\bip\b', r'\bdns\b', r'\bssl\b', r'\btls\b',
            
            # 开发工具和概念
            r'\bcli\b', r'\bgui\b', r'\bui\b', r'\bux\b', r'\bframework\b',
            r'\blibrary\b', r'\bpackage\b', r'\bmodule\b', r'\bclass\b',
            r'\bfunction\b', r'\bmethod\b', r'\bvariable\b', r'\bobject\b',
            
            # 数据类型和值
            r'\barray\b', r'\bstring\b', r'\bnumber\b', r'\bboolean\b',
            r'\bnull\b', r'\bundefined\b', r'\btrue\b', r'\bfalse\b',
            
            # 控制流和语法
            r'\berror\b', r'\bexception\b', r'\btry\b', r'\bcatch\b',
            r'\bif\b', r'\belse\b', r'\bfor\b', r'\bwhile\b', r'\breturn\b',
            r'\bimport\b', r'\bexport\b', r'\brequire\b', r'\bconst\b',
            r'\blet\b', r'\bvar\b',
            
            # 其他常见技术词汇
            r'\bconfig\b', r'\bsetup\b', r'\binstall\b', r'\bupdate\b',
            r'\bversion\b', r'\bbuild\b', r'\bcompile\b', r'\bdeploy\b',
            r'\btest\b', r'\bdebug\b', r'\blog\b', r'\bcache\b',
            r'\btoken\b', r'\bauth\b', r'\blogin\b', r'\blogout\b',
            r'\bsession\b', r'\bcookie\b', r'\bheader\b', r'\bbody\b',
            r'\brequest\b', r'\bresponse\b', r'\bget\b', r'\bpost\b',
            r'\bput\b', r'\bdelete\b', r'\bpatch\b', r'\boptions\b'
        ]
        
        for word in tech_words:
            text = re.sub(word, '', text, flags=re.IGNORECASE)
        
        # 6. 移除markdown格式
        text = re.sub(r'\*{1,2}([^*]+)\*{1,2}', r'\1', text)  # 删除加粗和斜体
        text = re.sub(r'`([^`]+)`', r'\1', text)  # 删除行内代码
        text = re.sub(r'```[^`]*```', '', text)  # 删除代码块
        
        # 7. 移除代码相关的特殊字符和符号
        text = re.sub(r'[{}()\[\]<>]', ' ', text)  # 移除括号
        text = re.sub(r'[&|!@#$%^*+=~`]', ' ', text)  # 移除特殊符号
        text = re.sub(r'[_\-]{2,}', ' ', text)  # 移除连续的下划线和破折号
        
        # 8. 移除多余的空白字符和换行
        text = re.sub(r'\s+', ' ', text).strip()
        
        # 9. 智能提取核心对话内容
        # 如果文本很长，可能包含系统信息，尝试提取中间的对话部分
        if len(text) > 200:  # 如果文本很长
            # 按句号、感叹号、问号分割
            sentences = re.split(r'[。！？.!?]\s*', text)
            # 过滤掉可能的系统信息（太短或包含特定关键词的句子）
            filtered_sentences = []
            for sentence in sentences:
                sentence = sentence.strip()
                if (len(sentence) > 5 and 
                    not re.search(r'(系统|提示|错误|warning|error|debug)', sentence, re.I) and
                    not re.search(r'^(用户|User|人物|角色)[:：]', sentence)):
                    filtered_sentences.append(sentence)
            
            # 如果找到了有效句子，取前几句作为对话内容
            if filtered_sentences:
                text = '。'.join(filtered_sentences[:2])  # 取前两句
                if not text.endswith(('。', '！', '？')):
                    text += '。'
        
        # 10. 最终长度检查，如果还是太长就截取
        if len(text) > 150:
            # 按标点符号截取，保持语义完整性
            for end_char in ['。', '！', '？', '；', '，']:
                pos = text.find(end_char, 100)  # 从100字符后开始找结束符
                if pos > 0:
                    text = text[:pos+1]
                    break
            else:
                # 如果没找到合适的结束符，直接截取
                text = text[:150] + '。'
        
        # 记录清理过程
        logger.info(f"清理步骤:")
        logger.info(f"  原始: {original_text[:50]}...")
        logger.info(f"  清理后: {text[:50]}...")
        logger.info(f"  清理后长度: {len(text)}")
        logger.info(f"=== TTS文本处理结束 ===")
        
        return text

    async def text_to_speech(
        self,
        text: str,
        character_name: str,
        novel_project: NovelProject,
        role_type: str = "supporting", 
        gender: str = "male",
        effects: Dict[str, any] = None
    ) -> Optional[str]:
        """文字转语音主方法"""
        try:
            # 清理文本，只保留AI对话内容
            cleaned_text = self._clean_text_for_tts(text)
            
            # 如果清理后文本为空，返回None
            if not cleaned_text:
                logger.warning("清理后的文本为空，跳过TTS生成")
                return None
            
            # 获取语音配置
            voice_config = self._get_voice_config(character_name, gender, role_type)
            
            # 生成文件名（使用清理后的文本）
            filename = self._generate_audio_filename(cleaned_text, voice_config)
            audio_path = self.audio_dir / filename
            
            # 检查缓存
            if audio_path.exists() and self._is_cache_valid(str(audio_path)):
                logger.info(f"使用缓存音频: {filename}")
                return f"{settings.server_url}/static/audio/{filename}"
            
            # 生成语音（使用清理后的文本）
            audio_data = await self._generate_speech(cleaned_text, voice_config)
            if not audio_data:
                return None
            
            # 如果有FFmpeg效果需要应用
            if self.ffmpeg_available and effects:
                # 先保存原始音频到临时文件
                with tempfile.NamedTemporaryFile(suffix='.wav', delete=False) as temp_file:
                    temp_path = temp_file.name
                    temp_file.write(audio_data)
                
                try:
                    # 应用FFmpeg效果
                    if await self._apply_ffmpeg_effects(temp_path, str(audio_path), effects):
                        os.unlink(temp_path)  # 删除临时文件
                    else:
                        # FFmpeg处理失败，使用原始音频
                        async with aiofiles.open(audio_path, 'wb') as f:
                            await f.write(audio_data)
                        os.unlink(temp_path)
                except Exception as e:
                    logger.error(f"音频后处理失败: {e}")
                    # 保存原始音频
                    async with aiofiles.open(audio_path, 'wb') as f:
                        await f.write(audio_data)
                    if os.path.exists(temp_path):
                        os.unlink(temp_path)
            else:
                # 直接保存音频
                async with aiofiles.open(audio_path, 'wb') as f:
                    await f.write(audio_data)
            
            # 更新缓存记录
            self._update_cache_record(str(audio_path))
            
            logger.info(f"语音生成成功: {filename}")
            return f"{settings.server_url}/static/audio/{filename}"
            
        except Exception as e:
            logger.error(f"TTS处理失败: {e}")
            return None
    
    def _is_cache_valid(self, audio_path: str) -> bool:
        """检查缓存是否有效"""
        if audio_path not in self.cache_expiry:
            return False
        
        return datetime.now() < self.cache_expiry[audio_path]
    
    def _update_cache_record(self, audio_path: str):
        """更新缓存记录"""
        # 缓存24小时
        self.cache_expiry[audio_path] = datetime.now() + timedelta(hours=24)
    
    def get_character_voice_info(self, character_name: str) -> Dict:
        """获取角色语音信息"""
        gender = self._infer_gender_from_name(character_name)
        voice_config = self._get_voice_config(character_name, gender)
        
        return {
            "character_name": character_name,
            "voice_name": voice_config.voice_name,
            "gender": gender,
            "language": voice_config.language,
            "rate": voice_config.rate,
            "volume": voice_config.volume,
            "pitch": voice_config.pitch
        }
    
    def get_available_voices(self) -> Dict:
        """获取可用语音列表"""
        return {
            "male_voices": list(self.voice_configs["male"].keys()),
            "female_voices": list(self.voice_configs["female"].keys()),
            "ffmpeg_available": self.ffmpeg_available,
            "supported_effects": [
                "volume", "pitch", "speed", "echo", 
                "telephone", "highpass", "compress"
            ] if self.ffmpeg_available else []
        }
    
    async def cleanup_old_cache(self):
        """清理过期缓存"""
        try:
            current_time = datetime.now()
            expired_files = []
            
            for audio_path, expiry_time in self.cache_expiry.items():
                if current_time > expiry_time:
                    expired_files.append(audio_path)
            
            for audio_path in expired_files:
                try:
                    if os.path.exists(audio_path):
                        os.remove(audio_path)
                    del self.cache_expiry[audio_path]
                    logger.info(f"清理过期缓存: {audio_path}")
                except Exception as e:
                    logger.error(f"清理缓存失败 {audio_path}: {e}")
                    
        except Exception as e:
            logger.error(f"缓存清理异常: {e}")

# 创建全局TTS服务实例
tts_service = TTSService() 