#!/usr/bin/env python
# -*- coding: utf-8 -*-

"""
语音合成引擎
负责文本到语音的转换过程
"""

import os
import sys
import torch
import logging
import numpy as np
import soundfile as sf
from pathlib import Path
import json
import nltk
from nltk.tokenize import sent_tokenize, TweetTokenizer
from g2p_en import G2p
from tqdm import tqdm
import re
try:
    from nemo_text_processing.text_normalization.normalize import Normalizer
except ImportError:
    # 如果nemo_text_processing不可用，提供一个最小替代
    class Normalizer:
        def __init__(self, input_case='cased', lang='en'):
            self.input_case = input_case
            self.lang = lang
        
        def normalize(self, text, verbose=False):
            # 最小的文本归一化实现
            return text  # 直接返回原文本，保持API兼容性
from transformers import AutoTokenizer

# 导入项目模块
sys.path.append(str(Path(__file__).resolve().parent.parent))
from config.paths import AUDIO_OUTPUT_FILE, TORCH_MODELS_DIR, ASSETS_DIR
from core.model_manager import ModelManager

# 配置日志
logger = logging.getLogger("TTS引擎")

# 从原始api.py移植的辅助函数
def split_arpabet(text):
    splits = re.finditer(r"{{(([^}][^}]?|[^}]}?)*)}}", text)
    out = []
    start = 0
    for split in splits:
        non_arpa = text[start:split.start()]
        arpa = text[split.start():split.end()]
        out = out + [non_arpa] + [arpa]
        start = split.end()
    if start < len(text):
        out.append(text[start:])
    return out

def is_arpabet(text):
    if len(text) < 4:
        return False
    return text[:2] == "{{" and text[-2:] == "}}" 

def is_context(text):
    if len(text) < 4:
        return False
    return text[:2] == "[[" and text[-2:] == "]]" 

def get_sentences(text):
    sentences = sent_tokenize(text)
    # ["What is this?", "?"] => ["What is this??"]
    merged_sentences = []
    for i, sentence in enumerate(sentences):
        if sentence in [".", "?", "!"]:
            continue
        for next_sentence in sentences[i + 1:]:
            if next_sentence in [".", "?", "!"]:
                sentence = sentence + next_sentence
            else:
                break
        merged_sentences.append(sentence)
    return merged_sentences

class DeepPoniesTTS():
    def __init__(self):
        self.g2p = G2p()
        self.acoustic_model = torch.jit.load(TORCH_MODELS_DIR / "acoustic_model.pt")
        self.style_predictor = torch.jit.load(TORCH_MODELS_DIR / "style_predictor.pt")        
        self.vocoder = torch.jit.load(TORCH_MODELS_DIR / "vocoder.pt")
        self.tokenizer = AutoTokenizer.from_pretrained("prajjwal1/bert-tiny")
        self.normalizer = Normalizer(input_case='cased', lang='en')
        self.speaker2id = self.get_speaker2id()
        self.symbol2id = self.get_symbol2id()
        self.lexicon = self.get_lexicon()
        self.word_tokenizer = TweetTokenizer()
        self.acoustic_model.eval()
        self.style_predictor.eval()
        self.vocoder.eval()

    def get_speaker2id(self):
        speaker2id = {}
        with open(ASSETS_DIR / "speakerCategories.json", "r") as json_file:
            data = json.load(json_file)
        for category in data.keys():
            for item in data[category]["items"]:
                if not item["activated"]:
                    continue
                speaker2id[item["speaker"]] = item["speaker_id"]
        return speaker2id

    def get_symbol2id(self):
        with open(ASSETS_DIR / "symbol2id.json", "r") as json_file:
            symbol2id = json.load(json_file)
        return symbol2id

    def get_lexicon(self):
        dic = {}
        with open(ASSETS_DIR / "lexicon.txt", "r") as f:
            lines = f.readlines()
        for line in lines:
            split = line.rstrip().split(" ")
            text = split[0].strip()
            phones = split[1:]
            dic[text] = phones
        return dic

    def synthesize(self, text: str, speaker_name: str, duration_control: float=1.0, verbose: bool=True) -> np.ndarray:
        waves = []
        text = text.strip()
        speaker_ids = torch.LongTensor([self.speaker2id[speaker_name]]) 
        if text[-1] not in [".", "?", "!"]:
            text = text + "."

        sentences = get_sentences(text)
        if verbose:
            sentences = tqdm(sentences)
        for sentence in sentences:
            phone_ids = []
            subsentences_style = []
            for subsentence in split_arpabet(sentence):
                if is_arpabet(subsentence):
                    for phone in subsentence.strip()[2:-2].split(" "):
                        if "@" + phone in self.symbol2id:
                            phone_ids.append(self.symbol2id["@" + phone])
                else:
                    subsentences_style.append(subsentence)
                    subsentence = self.normalizer.normalize(subsentence, verbose=False)
                    for word in self.word_tokenizer.tokenize(subsentence):
                        word = word.lower()
                        if word in [".", "?", "!"]:
                            phone_ids.append(self.symbol2id[word])
                        elif word in [",", ";"]:
                            phone_ids.append(self.symbol2id["@SILENCE"])
                        elif word in self.lexicon:
                            for phone in self.lexicon[word]:
                                phone_ids.append(self.symbol2id["@" + phone])
                            phone_ids.append(self.symbol2id["@BLANK"])
                        else:
                            for phone in self.g2p(word):
                                phone_ids.append(self.symbol2id["@" + phone])
                            phone_ids.append(self.symbol2id["@BLANK"])
            
            subsentence_style = " ".join(subsentences_style)
            encoding = self.tokenizer(
                subsentence_style,
                add_special_tokens=True,
                padding=True, 
                return_tensors="pt"
            )
            input_ids = encoding["input_ids"]
            attention_mask = encoding["attention_mask"]
            phone_ids = torch.LongTensor([phone_ids])
            with torch.no_grad():
                style = self.style_predictor(input_ids, attention_mask)
                mels = self.acoustic_model(
                    phone_ids,
                    speaker_ids,
                    style,
                    1.0,
                    duration_control
                )
                wave = self.vocoder(mels, speaker_ids, torch.FloatTensor([1.0]))
                waves.append(wave.view(-1))
        full_wave = torch.cat(waves, dim=0).cpu().numpy()
        return full_wave

class TTSEngine:
    """语音合成引擎类"""
    
    def __init__(self, device=None, ui_callback=None):
        """
        初始化TTS引擎
        
        Args:
            device: 运行设备 (None自动选择可用设备)
            ui_callback: UI回调函数，用于在界面显示下载状态和错误
        """
        # 初始化模型管理器
        self.model_manager = ModelManager(ui_callback=ui_callback)
        
        # 初始化DeepPoniesTTS引擎
        self.tts = DeepPoniesTTS()
        
        # 音频参数
        self.sample_rate = 22050
        self.max_wav_length = 120 * self.sample_rate  # 最大120秒
    
    def synthesize(self, text, speaker_name, speed=1.0, play=True):
        """
        合成语音
        
        Args:
            text: 输入文本
            speaker_name: 角色名称
            speed: 语速因子 (0.5-2.0)
            play: 是否播放合成的音频
            
        Returns:
            tuple: (音频数据，保存的文件路径)
        """
        try:
            # 直接使用DeepPoniesTTS合成音频
            duration_control = 1.0 / speed  # 把速度转换为持续时间控制
            logger.info(f"使用角色: {speaker_name} 合成文本: '{text}'")
            
            audio = self.tts.synthesize(text, speaker_name, duration_control=duration_control, verbose=True)
            
            # 确保输出目录存在
            os.makedirs(os.path.dirname(AUDIO_OUTPUT_FILE), exist_ok=True)
            
            # 保存音频文件
            sf.write(AUDIO_OUTPUT_FILE, audio, self.sample_rate)
            logger.info(f"音频已保存: {AUDIO_OUTPUT_FILE}")
            
            # 播放音频
            if play:
                self.play_audio(AUDIO_OUTPUT_FILE)
            
            return audio, AUDIO_OUTPUT_FILE
            
        except Exception as e:
            logger.error(f"合成语音时出错: {str(e)}")
            return None, None
    
    def play_audio(self, file_path):
        """
        播放音频文件
        
        Args:
            file_path: 音频文件路径
        """
        try:
            # 根据操作系统选择播放命令
            if sys.platform == "win32":
                os.system(f'start {file_path}')
            elif sys.platform == "darwin":  # macOS
                os.system(f'afplay {file_path}')
            else:  # Linux
                os.system(f'aplay {file_path}')
                
            logger.info(f"正在播放: {file_path}")
        except Exception as e:
            logger.error(f"播放音频时出错: {str(e)}")
    
    def unload_models(self):
        """卸载模型以释放内存"""
        # 移除引用，让Python GC处理
        self.tts = None
        torch.cuda.empty_cache()
        logger.info("已卸载TTS模型")
    
    def __del__(self):
        """对象销毁时卸载模型"""
        self.unload_models()

# 示例用法
if __name__ == "__main__":
    logging.basicConfig(level=logging.INFO)
    
    engine = TTSEngine()
    engine.synthesize(
        "你好，我是暮光闪闪，很高兴认识你！",
        "Twilight Sparkle",
        speed=1.0
    ) 