#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Python 3.10 + PaddleSpeech 高级文本转语音程序
实现高质量中文语音合成，支持多说话人、情感控制等功能
"""

import os
import time
import logging
from pathlib import Path
from typing import List, Dict, Optional, Union
import numpy as np

# 音频处理相关
import librosa
import soundfile as sf
from pydub import AudioSegment

# PaddleSpeech 相关
from paddlespeech.cli.tts.infer import TTSExecutor
from paddlespeech.cli.tts import TTSExecutor as TTSExec
import paddle


class AdvancedPaddleTTS:
    """高级 PaddleSpeech 文本转语音类"""

    def __init__(self, device: str = "mps", enable_onnx: bool = False):
        """
        初始化 TTS 引擎

        Args:
            device: 运行设备 ("cpu" 或 "gpu")
            enable_onnx: 是否启用 ONNX 加速
        """
        self.device = device
        self.enable_onnx = enable_onnx
        self.tts_executor = None
        self.current_model = None
        self.current_speaker = None

        # 配置日志
        logging.basicConfig(level=logging.INFO)
        self.logger = logging.getLogger(__name__)

        # 初始化 TTS 执行器
        self._initialize_tts()

        # 预定义的说话人和情感配置
        self._setup_speakers_and_emotions()

    def _initialize_tts(self):
        """初始化 TTS 执行器"""
        try:
            self.logger.info("正在初始化 PaddleSpeech TTS 引擎...")

            # 设置设备
            if self.device == "gpu" and paddle.device.is_compiled_with_cuda():
                paddle.device.set_device('gpu:0')
                self.logger.info("使用 GPU 加速")
            else:
                paddle.device.set_device('cpu')
                self.logger.info("使用 CPU 运行")

            # 创建 TTS 执行器
            self.tts_executor = TTSExecutor()

            # 默认使用高质量的 FastSpeech2 + HiFiGAN 模型
            self.current_model = {
                'am': 'fastspeech2_csmsc',
                'am_config': None,
                'am_ckpt': None,
                'am_stat': None,
                'phones_dict': None,
                'tones_dict': None,
                'speaker_dict': None,
                'voc': 'hifigan_csmsc',
                'voc_config': None,
                'voc_ckpt': None,
                'voc_stat': None,
                'lang': 'zh',
                'device': self.device
            }

            self.logger.info("TTS 引擎初始化完成!")

        except Exception as e:
            self.logger.error(f"TTS 引擎初始化失败: {e}")
            raise

    def _setup_speakers_and_emotions(self):
        """设置说话人和情感配置"""
        # 可用的说话人（需要相应的模型支持）
        self.speakers = {
            'default': {'name': '默认女声', 'id': 0, 'gender': 'female'},
            'male_1': {'name': '男声1', 'id': 1, 'gender': 'male'},
            'female_1': {'name': '女声1', 'id': 0, 'gender': 'female'},
            'child': {'name': '童声', 'id': 2, 'gender': 'child'}
        }

        # 情感控制参数（通过调节语速、音调等实现）
        self.emotions = {
            'neutral': {'speed': 1.0, 'pitch': 1.0, 'energy': 1.0, 'name': '中性'},
            'happy': {'speed': 1.2, 'pitch': 1.1, 'energy': 1.2, 'name': '开心'},
            'sad': {'speed': 0.8, 'pitch': 0.9, 'energy': 0.8, 'name': '悲伤'},
            'angry': {'speed': 1.3, 'pitch': 1.2, 'energy': 1.4, 'name': '愤怒'},
            'gentle': {'speed': 0.9, 'pitch': 0.95, 'energy': 0.9, 'name': '温柔'},
            'excited': {'speed': 1.4, 'pitch': 1.15, 'energy': 1.3, 'name': '兴奋'}
        }

        self.current_speaker = 'default'
        self.current_emotion = 'neutral'

    def get_available_speakers(self) -> Dict:
        """获取可用的说话人列表"""
        return self.speakers

    def get_available_emotions(self) -> Dict:
        """获取可用的情感列表"""
        return self.emotions

    def set_speaker(self, speaker_id: str) -> bool:
        """设置说话人"""
        if speaker_id in self.speakers:
            self.current_speaker = speaker_id
            self.logger.info(f"已切换到说话人: {self.speakers[speaker_id]['name']}")
            return True
        else:
            self.logger.error(f"未找到说话人: {speaker_id}")
            return False

    def set_emotion(self, emotion: str) -> bool:
        """设置情感"""
        if emotion in self.emotions:
            self.current_emotion = emotion
            self.logger.info(f"已设置情感: {self.emotions[emotion]['name']}")
            return True
        else:
            self.logger.error(f"未找到情感: {emotion}")
            return False

    def synthesize(self,
                   text: str,
                   output_path: Optional[str] = None,
                   speaker: Optional[str] = None,
                   emotion: Optional[str] = None,
                   speed: Optional[float] = None,
                   pitch: Optional[float] = None) -> Optional[str]:
        """
        合成语音

        Args:
            text: 要合成的文本
            output_path: 输出音频文件路径
            speaker: 说话人 ID（可选）
            emotion: 情感（可选）
            speed: 语速倍率（可选）
            pitch: 音调倍率（可选）

        Returns:
            生成的音频文件路径
        """
        try:
            if not text.strip():
                self.logger.error("文本内容不能为空")
                return None

            # 使用指定的说话人和情感，或使用当前设置
            current_speaker = speaker or self.current_speaker
            current_emotion = emotion or self.current_emotion

            # 获取情感参数
            emotion_params = self.emotions[current_emotion]
            final_speed = speed or emotion_params['speed']
            final_pitch = pitch or emotion_params['pitch']

            # 生成输出文件路径
            if output_path is None:
                timestamp = int(time.time())
                output_path = f"../tmp/output_{timestamp}.wav"

            self.logger.info(f"正在合成语音: {text[:50]}...")
            self.logger.info(f"说话人: {self.speakers[current_speaker]['name']}")
            self.logger.info(f"情感: {self.emotions[current_emotion]['name']}")

            # 执行语音合成
            wav = self.tts_executor(
                text=text,
                output=output_path,
                am=self.current_model['am'],
                voc=self.current_model['voc'],
                lang=self.current_model['lang'],
                device=self.device,
                spk_id=self.speakers[current_speaker]['id']
            )

            # 应用情感和语速调整
            if final_speed != 1.0 or final_pitch != 1.0:
                self._apply_emotion_effects(output_path, final_speed, final_pitch)

            self.logger.info(f"语音合成完成: {output_path}")
            return output_path

        except Exception as e:
            self.logger.error(f"语音合成失败: {e}")
            return None

    def _apply_emotion_effects(self, audio_path: str, speed: float, pitch: float):
        """应用情感效果到音频文件"""
        try:
            # 读取音频
            audio = AudioSegment.from_wav(audio_path)

            # 调整语速
            if speed != 1.0:
                # 使用 pydub 的 speedup 方法
                if speed > 1.0:
                    audio = audio.speedup(playback_speed=speed)
                else:
                    # 减速处理
                    audio = audio._spawn(audio.raw_data, overrides={
                        "frame_rate": int(audio.frame_rate * speed)
                    }).set_frame_rate(audio.frame_rate)

            # 调整音调（简单实现）
            if pitch != 1.0:
                # 使用 librosa 进行更精确的音调调整
                y, sr = librosa.load(audio_path, sr=None)
                y_shifted = librosa.effects.pitch_shift(y, sr=sr, n_steps=12 * np.log2(pitch))
                sf.write(audio_path, y_shifted, sr)
                return

            # 保存调整后的音频
            audio.export(audio_path, format="wav")

        except Exception as e:
            self.logger.warning(f"应用情感效果失败: {e}")

    def batch_synthesize(self,
                         texts: List[str],
                         output_dir: str = "batch_output",
                         speaker: Optional[str] = None,
                         emotion: Optional[str] = None) -> List[str]:
        """
        批量合成语音

        Args:
            texts: 文本列表
            output_dir: 输出目录
            speaker: 说话人 ID
            emotion: 情感

        Returns:
            生成的音频文件路径列表
        """
        # 创建输出目录
        Path(output_dir).mkdir(exist_ok=True)

        output_files = []
        total = len(texts)

        self.logger.info(f"开始批量合成 {total} 个文本")

        for i, text in enumerate(texts, 1):
            try:
                output_path = os.path.join(output_dir, f"batch_{i:03d}.wav")
                result = self.synthesize(
                    text=text,
                    output_path=output_path,
                    speaker=speaker,
                    emotion=emotion
                )

                if result:
                    output_files.append(result)
                    self.logger.info(f"进度: {i}/{total} - 已完成")
                else:
                    self.logger.error(f"进度: {i}/{total} - 合成失败")

            except Exception as e:
                self.logger.error(f"批量合成第 {i} 个文本时出错: {e}")

        self.logger.info(f"批量合成完成，成功生成 {len(output_files)} 个音频文件")
        return output_files

    def create_dialogue(self,
                        dialogue_data: List[Dict],
                        output_path: str = "dialogue.wav",
                        pause_duration: float = 0.5) -> Optional[str]:
        """
        创建多角色对话音频

        Args:
            dialogue_data: 对话数据，格式如下：
                [
                    {"speaker": "female_1", "emotion": "happy", "text": "你好！"},
                    {"speaker": "male_1", "emotion": "neutral", "text": "你好，很高兴见到你！"}
                ]
            output_path: 输出文件路径
            pause_duration: 对话间隔时间（秒）

        Returns:
            合成的对话音频文件路径
        """
        try:
            self.logger.info(f"开始创建多角色对话，共 {len(dialogue_data)} 句")

            audio_segments = []
            temp_files = []

            for i, item in enumerate(dialogue_data):
                # 为每句话生成临时音频文件
                temp_file = f"temp_dialogue_{i}.wav"
                temp_files.append(temp_file)

                result = self.synthesize(
                    text=item['text'],
                    output_path=temp_file,
                    speaker=item.get('speaker'),
                    emotion=item.get('emotion', 'neutral')
                )

                if result:
                    # 加载音频片段
                    segment = AudioSegment.from_wav(temp_file)
                    audio_segments.append(segment)

                    # 添加停顿（除了最后一句）
                    if i < len(dialogue_data) - 1:
                        pause = AudioSegment.silent(duration=int(pause_duration * 1000))
                        audio_segments.append(pause)
                else:
                    self.logger.error(f"对话第 {i + 1} 句合成失败")

            if audio_segments:
                # 合并所有音频片段
                final_audio = audio_segments[0]
                for segment in audio_segments[1:]:
                    final_audio += segment

                # 导出最终音频
                final_audio.export(output_path, format="wav")

                # 清理临时文件
                for temp_file in temp_files:
                    try:
                        os.remove(temp_file)
                    except:
                        pass

                self.logger.info(f"多角色对话创建完成: {output_path}")
                return output_path
            else:
                self.logger.error("没有成功合成任何对话音频")
                return None

        except Exception as e:
            self.logger.error(f"创建多角色对话失败: {e}")
            return None

    def convert_text_file(self,
                          file_path: str,
                          output_path: Optional[str] = None,
                          encoding: str = 'utf-8') -> Optional[str]:
        """
        将文本文件转换为语音

        Args:
            file_path: 文本文件路径
            output_path: 输出音频文件路径
            encoding: 文件编码

        Returns:
            生成的音频文件路径
        """
        try:
            # 读取文本文件
            with open(file_path, 'r', encoding=encoding) as f:
                text = f.read().strip()

            if not text:
                self.logger.error("文本文件为空")
                return None

            # 生成输出路径
            if output_path is None:
                base_name = Path(file_path).stem
                output_path = f"{base_name}_audio.wav"

            # 合成语音
            return self.synthesize(text, output_path)

        except Exception as e:
            self.logger.error(f"转换文本文件失败: {e}")
            return None

    def get_model_info(self) -> Dict:
        """获取当前模型信息"""
        return {
            'current_model': self.current_model,
            'current_speaker': self.current_speaker,
            'current_emotion': self.current_emotion,
            'device': self.device,
            'speakers_count': len(self.speakers),
            'emotions_count': len(self.emotions)
        }


def main():
    """主函数 - 交互式命令行界面"""
    print("=" * 60)
    print("🎤 Python 3.10 + PaddleSpeech 高级文本转语音程序")
    print("=" * 60)

    # 初始化 TTS 引擎
    try:
        print("正在初始化 PaddleSpeech 引擎...")
        tts = AdvancedPaddleTTS(device="cpu")  # 可改为 "gpu"
        print("✅ 初始化完成!")
    except Exception as e:
        print(f"❌ 初始化失败: {e}")
        return

    while True:
        print("\n" + "=" * 40)
        print("请选择功能:")
        print("1. 文本转语音")
        print("2. 设置说话人")
        print("3. 设置情感")
        print("4. 批量处理")
        print("5. 创建多角色对话")
        print("6. 转换文本文件")
        print("7. 查看当前设置")
        print("8. 查看可用说话人")
        print("9. 查看可用情感")
        print("0. 退出程序")
        print("=" * 40)

        choice = input("请输入选择 (0-9): ").strip()

        if choice == '0':
            print("👋 感谢使用！再见！")
            break

        elif choice == '1':
            text = input("请输入要合成的文本: ").strip()
            if text:
                result = tts.synthesize(text)
                if result:
                    print(f"✅ 语音合成完成: {result}")
                else:
                    print("❌ 语音合成失败")
            else:
                print("❌ 文本不能为空")

        elif choice == '2':
            speakers = tts.get_available_speakers()
            print("\n可用说话人:")
            for key, info in speakers.items():
                print(f"  {key}: {info['name']} ({info['gender']})")

            speaker_id = input("请输入说话人 ID: ").strip()
            if tts.set_speaker(speaker_id):
                print("✅ 说话人设置成功")
            else:
                print("❌ 说话人设置失败")

        elif choice == '3':
            emotions = tts.get_available_emotions()
            print("\n可用情感:")
            for key, info in emotions.items():
                print(f"  {key}: {info['name']}")

            emotion = input("请输入情感 ID: ").strip()
            if tts.set_emotion(emotion):
                print("✅ 情感设置成功")
            else:
                print("❌ 情感设置失败")

        elif choice == '4':
            print("批量处理模式")
            texts = []
            print("请输入要合成的文本（输入空行结束）:")
            while True:
                text = input(f"文本 {len(texts) + 1}: ").strip()
                if not text:
                    break
                texts.append(text)

            if texts:
                output_dir = input("输出目录 (默认: batch_output): ").strip() or "batch_output"
                results = tts.batch_synthesize(texts, output_dir)
                print(f"✅ 批量处理完成，生成 {len(results)} 个音频文件")
            else:
                print("❌ 没有输入任何文本")

        elif choice == '5':
            print("多角色对话模式")
            dialogue = []
            print("请输入对话内容（输入空行结束）:")
            while True:
                speaker = input(f"对话 {len(dialogue) + 1} - 说话人: ").strip()
                if not speaker:
                    break
                emotion = input(f"对话 {len(dialogue) + 1} - 情感 (默认: neutral): ").strip() or "neutral"
                text = input(f"对话 {len(dialogue) + 1} - 文本: ").strip()
                if text:
                    dialogue.append({
                        "speaker": speaker,
                        "emotion": emotion,
                        "text": text
                    })

            if dialogue:
                output_path = input("输出文件 (默认: dialogue.wav): ").strip() or "dialogue.wav"
                result = tts.create_dialogue(dialogue, output_path)
                if result:
                    print(f"✅ 多角色对话创建完成: {result}")
                else:
                    print("❌ 多角色对话创建失败")
            else:
                print("❌ 没有输入任何对话")

        elif choice == '6':
            file_path = input("请输入文本文件路径: ").strip()
            if file_path and os.path.exists(file_path):
                result = tts.convert_text_file(file_path)
                if result:
                    print(f"✅ 文本文件转换完成: {result}")
                else:
                    print("❌ 文本文件转换失败")
            else:
                print("❌ 文件不存在")

        elif choice == '7':
            info = tts.get_model_info()
            print("\n当前设置:")
            print(f"  设备: {info['device']}")
            print(f"  说话人: {info['current_speaker']}")
            print(f"  情感: {info['current_emotion']}")
            print(f"  可用说话人数量: {info['speakers_count']}")
            print(f"  可用情感数量: {info['emotions_count']}")

        elif choice == '8':
            speakers = tts.get_available_speakers()
            print("\n可用说话人:")
            for key, info in speakers.items():
                marker = "👤" if info['gender'] == 'male' else "👩" if info['gender'] == 'female' else "👶"
                print(f"  {marker} {key}: {info['name']} ({info['gender']})")

        elif choice == '9':
            emotions = tts.get_available_emotions()
            print("\n可用情感:")
            emotion_icons = {
                'neutral': '😐', 'happy': '😊', 'sad': '😢',
                'angry': '😠', 'gentle': '😌', 'excited': '🤩'
            }
            for key, info in emotions.items():
                icon = emotion_icons.get(key, '🎭')
                print(f"  {icon} {key}: {info['name']}")

        else:
            print("❌ 无效选择，请重新输入")


if __name__ == "__main__":
    main()