"""
MiniMax TTS语音合成模块
全球榜首的中文TTS，音质接近真人
官网: https://www.minimaxi.com
"""

import os
import time
import wave
import uuid
import requests
import base64
from typing import List, Optional, Tuple
from pathlib import Path

import numpy as np
from pydub import AudioSegment

from ..core.models import LipSyncData
from ..utils.logger import get_logger
from ..utils.temp_file_manager import get_temp_file_manager


class MiniMaxTTS:
    """MiniMax TTS语音合成引擎（全球榜首）"""

    def __init__(
        self,
        api_key: str,
        group_id: str,
        voice_id: str = "male-qn-qingse",  # 默认音色
        api_url: str = "https://api.minimax.chat/v1/t2a_v2",
        speed: float = 1.0,  # 语速 0.5-2.0
        vol: float = 1.0,    # 音量 0.1-10.0
        pitch: int = 0,      # 音调 -12到12
        emotion: str = "neutral",  # 情感：neutral/happy/sad等
        audio_sample_rate: int = 32000,  # 采样率：16000/24000/32000
        bitrate: int = 128000,  # 比特率
        timeout: int = 30
    ):
        """
        初始化MiniMax TTS引擎
        
        Args:
            api_key: MiniMax API密钥
            group_id: MiniMax Group ID
            voice_id: 音色ID（预设音色或自定义音色）
            api_url: API接口地址
            speed: 语速 (0.5-2.0)
            vol: 音量 (0.1-10.0)
            pitch: 音调 (-12到12，半音阶)
            emotion: 情感类型 (neutral/happy/sad/angry/fearful等)
            audio_sample_rate: 音频采样率
            bitrate: 音频比特率
            timeout: 请求超时时间(秒)
        """
        self.api_key = api_key
        self.group_id = group_id
        self.voice_id = voice_id
        self.api_url = api_url
        self.speed = speed
        self.vol = vol
        self.pitch = pitch
        self.emotion = emotion
        self.audio_sample_rate = audio_sample_rate
        self.bitrate = bitrate
        self.timeout = timeout

        self.logger = get_logger()
        self.current_audio: Optional[AudioSegment] = None
        self.current_lipsync_data: List[LipSyncData] = []
        
        # 使用统一的临时文件管理器
        self.temp_manager = get_temp_file_manager()
        self.temp_dir = self.temp_manager.get_temp_dir('audio')
        
        self.logger.info(f"✓ MiniMax TTS引擎初始化完成")
        self.logger.info(f"   音色: {self.voice_id}")
        self.logger.info(f"   采样率: {self.audio_sample_rate}Hz")

    def synthesize(self, text: str, speaker: Optional[str] = None) -> Tuple[Optional[AudioSegment], List[LipSyncData]]:
        """
        合成语音
        
        Args:
            text: 要合成的文本
            speaker: 临时指定音色ID（可选，覆盖默认音色）
            
        Returns:
            (音频对象, 口型同步数据列表)
        """
        try:
            self.logger.debug(f"正在使用MiniMax合成语音: {text[:50]}...")
            
            # 使用指定音色或默认音色
            current_voice_id = speaker if speaker else self.voice_id

            # 构建请求数据
            request_data = {
                "model": "speech-02-240228",  # 最新Speech-02-HD模型
                "text": text,
                "stream": False,  # 非流式
                "voice_setting": {
                    "voice_id": current_voice_id,
                    "speed": self.speed,
                    "vol": self.vol,
                    "pitch": self.pitch,
                    "emotion": self.emotion
                },
                "audio_setting": {
                    "sample_rate": self.audio_sample_rate,
                    "bitrate": self.bitrate,
                    "format": "wav"  # 使用WAV格式便于提取口型
                }
            }

            # 设置请求头
            headers = {
                "Authorization": f"Bearer {self.api_key}",
                "Content-Type": "application/json"
            }

            # 发送请求
            self.logger.debug("发送API请求...")
            start_time = time.time()
            
            response = requests.post(
                self.api_url,
                json=request_data,
                headers=headers,
                params={"GroupId": self.group_id},
                timeout=self.timeout
            )

            request_time = time.time() - start_time
            self.logger.debug(f"API响应时间: {request_time:.2f}秒")

            # 检查响应状态
            if response.status_code != 200:
                error_msg = f"MiniMax API请求失败: HTTP {response.status_code}"
                try:
                    error_detail = response.json()
                    error_msg += f" - {error_detail}"
                    self.logger.error(error_msg)
                    self.logger.error(f"完整错误: {error_detail}")
                except:
                    error_msg += f" - {response.text[:200]}"
                    self.logger.error(error_msg)
                return None, []

            # 解析响应
            response_data = response.json()
            
            # 检查响应格式
            if "data" not in response_data or "audio" not in response_data["data"]:
                self.logger.error(f"MiniMax响应格式错误: {response_data}")
                return None, []

            # 获取Base64编码的音频数据
            audio_base64 = response_data["data"]["audio"]
            
            # 解码音频数据
            audio_bytes = base64.b64decode(audio_base64)
            
            # 保存音频到项目临时目录
            temp_filename = f"minimax_tts_{uuid.uuid4().hex[:8]}.wav"
            temp_file_path = self.temp_dir / temp_filename
            
            # 跟踪临时文件
            self.temp_manager.track_file(temp_file_path)
            
            # 写入音频数据
            with open(temp_file_path, 'wb') as f:
                f.write(audio_bytes)

            # 加载音频
            audio = AudioSegment.from_wav(str(temp_file_path))
            self.current_audio = audio

            # 提取口型同步数据（使用60fps以匹配渲染）
            lipsync_data = self._extract_lipsync_data(str(temp_file_path), fps=60)
            self.current_lipsync_data = lipsync_data

            # 删除临时文件
            try:
                self.temp_manager.cleanup_file(temp_file_path)
            except Exception as e:
                self.logger.warning(f"清理临时文件失败: {e}")

            self.logger.info(
                f"✓ MiniMax语音合成成功, "
                f"时长: {len(audio)/1000:.2f}秒, "
                f"口型帧数: {len(lipsync_data)}"
            )

            return audio, lipsync_data

        except requests.exceptions.Timeout:
            self.logger.error(f"MiniMax API请求超时 (>{self.timeout}秒)")
            return None, []
        except requests.exceptions.RequestException as e:
            self.logger.error(f"MiniMax API请求异常: {e}")
            return None, []
        except Exception as e:
            self.logger.error(f"MiniMax语音合成异常: {e}")
            import traceback
            self.logger.error(traceback.format_exc())
            return None, []

    def _extract_lipsync_data(self, audio_file: str, fps: int = 60) -> List[LipSyncData]:
        """
        从音频文件提取口型同步数据
        
        Args:
            audio_file: 音频文件路径
            fps: 帧率（默认60fps，匹配渲染帧率）
            
        Returns:
            口型同步数据列表
        """
        try:
            # 打开WAV文件
            with wave.open(audio_file, 'rb') as wav_file:
                # 获取音频参数
                sample_rate = wav_file.getframerate()
                n_frames = wav_file.getnframes()
                audio_data = wav_file.readframes(n_frames)
                
                # 转换为numpy数组
                audio_array = np.frombuffer(audio_data, dtype=np.int16)
                
                # 计算每帧的时间间隔
                frame_interval = 1.0 / fps
                samples_per_frame = int(sample_rate * frame_interval)
                
                lipsync_data = []
                
                # 按帧分析音频
                for i in range(0, len(audio_array), samples_per_frame):
                    frame_data = audio_array[i:i + samples_per_frame]
                    
                    if len(frame_data) == 0:
                        break
                    
                    # 计算RMS音量
                    rms = np.sqrt(np.mean(frame_data.astype(np.float32) ** 2))
                    max_amplitude = 32768.0  # 16-bit音频最大值
                    volume = min(rms / max_amplitude, 1.0)
                    
                    # 将音量映射到嘴巴张开度
                    # MiniMax音质极佳，使用优化的非线性映射
                    if volume > 0.01:  # 过滤噪音
                        # 幂函数映射：增强低音量可见度
                        mouth_open = min(np.power(volume * 3.0, 0.6), 1.0)
                    else:
                        mouth_open = 0.0
                    
                    # 添加口型数据
                    timestamp = i / sample_rate
                    lipsync_data.append(
                        LipSyncData(
                            timestamp=timestamp,
                            volume=volume,
                            mouth_open=mouth_open
                        )
                    )
                
                self.logger.debug(f"提取了 {len(lipsync_data)} 帧口型数据 (fps={fps})")
                return lipsync_data
                
        except Exception as e:
            self.logger.error(f"提取口型数据失败: {e}")
            return []

    def set_voice(self, voice_id: str):
        """
        切换音色
        
        Args:
            voice_id: 音色ID
        """
        self.voice_id = voice_id
        self.logger.info(f"已切换音色: {voice_id}")

    def set_emotion(self, emotion: str):
        """
        设置情感
        
        Args:
            emotion: 情感类型 (neutral/happy/sad/angry/fearful等)
        """
        self.emotion = emotion
        self.logger.debug(f"已设置情感: {emotion}")

    def set_speed(self, speed: float):
        """
        设置语速
        
        Args:
            speed: 语速 (0.5-2.0)
        """
        self.speed = max(0.5, min(2.0, speed))
        self.logger.debug(f"已设置语速: {self.speed}")

    def test_connection(self) -> bool:
        """
        测试API连接
        
        Returns:
            连接是否成功
        """
        try:
            self.logger.info("正在测试MiniMax API连接...")
            
            # 使用简短文本测试
            audio, lipsync_data = self.synthesize("你好")
            
            if audio is not None:
                self.logger.info("✓ MiniMax API连接测试成功!")
                self.logger.info(f"   测试音频时长: {len(audio)/1000:.2f}秒")
                return True
            else:
                self.logger.error("✗ MiniMax API连接测试失败")
                return False
                
        except Exception as e:
            self.logger.error(f"MiniMax API连接测试异常: {e}")
            return False

    def get_audio_duration(self, audio: Optional[AudioSegment] = None) -> float:
        """
        获取音频时长(秒)
        
        Args:
            audio: 音频对象
            
        Returns:
            音频时长(秒)
        """
        if audio is None:
            audio = self.current_audio

        if audio is None:
            return 0.0

        return len(audio) / 1000.0

    def cleanup_temp_files(self):
        """清理临时文件"""
        try:
            self.temp_manager.cleanup_all()
            self.logger.info("✓ MiniMax TTS临时文件已清理")
        except Exception as e:
            self.logger.warning(f"清理临时文件失败: {e}")


# MiniMax预设音色列表
MINIMAX_VOICES = {
    # 男声
    "male-qn-qingse": "青涩青年音色",
    "male-qn-jingying": "精英青年音色", 
    "male-qn-badao": "霸道青年音色",
    "male-qn-daxuesheng": "青年大学生音色",
    
    # 女声
    "female-shaonv": "少女音色",
    "female-yujie": "御姐音色",
    "female-chengshu": "成熟女性音色",
    "female-tianmei": "甜美女性音色",
    
    # 童声
    "male-child": "男童音色",
    "female-child": "女童音色",
    
    # 老年
    "male-laonian": "老年男性音色",
    "female-laonian": "老年女性音色",
    
    # 特色
    "presenter_male": "男性主播音色",
    "presenter_female": "女性主播音色",
    "audiobook_male_1": "男性有声书音色1",
    "audiobook_female_1": "女性有声书音色1",
}

# 情感类型
MINIMAX_EMOTIONS = [
    "neutral",    # 中性
    "happy",      # 开心
    "sad",        # 悲伤
    "angry",      # 生气
    "fearful",    # 恐惧
    "disgusted",  # 厌恶
    "surprised",  # 惊讶
]
