"""ChatTTS 客户端
使用ChatTTS源码进行语音合成
"""

import asyncio
import json
import os
import tempfile
import uuid
import re
from typing import Dict, Any, Optional, List, AsyncGenerator, Tuple
from pathlib import Path
import sys
import os
import logging
import numpy as np
from scipy.io import wavfile
import time

# 添加ChatTTS模块路径
sys.path.append(os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), "ChatTTS"))
from ChatTTS import Chat

import torch

# 使用简单配置
class Settings:
    """简单配置类"""
    def __init__(self):
        self.CHATTTS_MODEL_DIR = os.path.join(os.path.dirname(__file__), "asset")

class ChatTTSClient:
    """ChatTTS 客户端 - 使用源码"""
    
    def __init__(self, settings=None):
        """初始化ChatTTS客户端
        
        Args:
            settings: 应用配置
        """
        self.settings = settings or Settings()
        
        # 设置日志
        self.logger = logging.getLogger("chattts")
        self.logger.setLevel(logging.INFO)
        
        # 创建ChatTTS实例
        try:
            self.chat = Chat()
            self.logger.info("ChatTTS实例创建成功")
        except Exception as e:
            self.logger.error(f"创建ChatTTS实例失败: {e}")
            self.chat = None
        
        # 创建输出目录
        self.output_dir = Path("d:/pythonProject/AI_voice/output/chattts_audio")
        self.output_dir.mkdir(exist_ok=True, parents=True)
        
    def clean_text(self, text: str) -> str:
        """清理文本，删除*号、多余空格等不必要的符号
        
        Args:
            text: 原始文本
            
        Returns:
            清理后的文本
        """
        # 删除*号
        text = text.replace('*', '')
        
        # 删除多余空格
        text = re.sub(r'\s+', ' ', text)
        text = text.strip()
        
        # 删除其他可能影响语音生成的特殊符号
        text = re.sub(r'[#~`]+', '', text)
        
        # 删除连续的标点符号
        text = re.sub(r'([,.!?;:，。！？；：])\1+', r'\1', text)
        
        return text
        
        """将文本分割成句子
        
        Args:
            text: 原始文本
            
        Returns:
            句子列表
        """
        # 使用常见的中英文句子终止符分割文本
        sentence_endings = r'(?<=[.。!！?？;；])\s*'
        sentences = re.split(sentence_endings, text)
        
        # 过滤空句子
        sentences = [s.strip() for s in sentences if s.strip()]
        
        return sentences
    
    async def generate_speech(self, 
                            text: str, 
                            voice: str = "100",
                            emotion: Optional[str] = None,
                            speed: float = 1.0,
                            pitch: float = 0.0,
                            format: str = "mp3") -> str:
        """生成语音
        
        Args:
            text: 要转换为语音的文本
            voice: 语音角色ID
            emotion: 情感（如happy, sad, angry等）
            speed: 语速（0.5-2.0）
            pitch: 音调调整（-10.0-10.0）
            format: 音频格式（mp3, wav, ogg等）
            
        Returns:
            生成的音频文件路径
        """
        try:
            if self.chat is None:
                raise ValueError("ChatTTS实例创建失败")
            
            # 检查模型是否已加载
            if not self.chat.has_loaded(use_decoder=True):
                self.logger.info("模型未加载，正在加载模型...")
                # 直接调用load()方法，不传递model_dir参数
                self.chat.load()
                
            # 生成唯一的输出文件路径，使用wav格式
            # filename = f"{uuid.uuid4()}_{voice}_{int(speed*10)}_{int(pitch*10)}.wav"
            # output_path = self.output_dir / filename
            # output_path = str(output_path)
            
            # 使用ChatTTS生成语音
            self.logger.info(f"正在使用ChatTTS生成语音，文本长度: {len(text)}")
            
            # 设置语音角色
            speaker_id = int(voice) if voice.isdigit() else 0
            
            # 生成语音
            wav = await asyncio.to_thread(
                self.chat.infer,
                text=text,
                voice_id=voice,  # 添加音色参数
                use_decoder=True
            )
            
            # 保存音频文件
            # 检查wav是否为列表，如果是，取第一个元素
            if isinstance(wav, list):
                wav_data = wav[0]
            else:
                wav_data = wav
                
            # 转换为numpy数组
            if isinstance(wav_data, torch.Tensor):
                wav_data = wav_data.numpy()
                
            # 确保是浮点数据
            if wav_data.dtype != np.float32:
                wav_data = wav_data.astype(np.float32)
                
            # 归一化到[-1, 1]范围
            if wav_data.max() > 1.0 or wav_data.min() < -1.0:
                wav_data = wav_data / max(abs(wav_data.max()), abs(wav_data.min()))
                
            # 转换为16位整数
            wav_data_int16 = (wav_data * 32767).astype(np.int16)
            
            # 使用scipy保存音频
            wavfile.write(output_path, 24000, wav_data_int16)
            self.logger.info(f"语音生成成功，保存到: {output_path}")
            
            return output_path
        except Exception as e:
            self.logger.error(f"语音生成失败: {e}")
            raise
    
    async def generate_speech_streaming(self, 
                                      text: str, 
                                      voice: str = "100",
                                      chunk_size: int = 4800,  # 0.2秒的音频
                                      format: str = "wav") -> AsyncGenerator[bytes, None]:
        """流式生成语音
        
        Args:
            text: 要转换为语音的文本
            voice: 语音角色ID
            chunk_size: 每个音频块的大小（采样点数）
            format: 音频格式（目前仅支持wav）
            
        Yields:
            音频数据块
        """
        try:
            if self.chat is None:
                raise ValueError("ChatTTS实例创建失败")
            
            # 检查模型是否已加载
            if not self.chat.has_loaded(use_decoder=True):
                self.logger.info("模型未加载，正在加载模型...")
                self.chat.load()
            
            # 使用ChatTTS生成语音
            self.logger.info(f"正在流式生成语音，文本长度: {len(text)}")
            
            # 生成语音
            wav = await asyncio.to_thread(
                self.chat.infer,
                text=text,
                use_decoder=True
            )
            
            # 处理音频数据
            if isinstance(wav, list):
                wav_data = wav[0]
            else:
                wav_data = wav
                
            # 转换为numpy数组
            if isinstance(wav_data, torch.Tensor):
                wav_data = wav_data.numpy()
                
            # 确保是浮点数据
            if wav_data.dtype != np.float32:
                wav_data = wav_data.astype(np.float32)
                
            # 归一化到[-1, 1]范围
            if wav_data.max() > 1.0 or wav_data.min() < -1.0:
                wav_data = wav_data / max(abs(wav_data.max()), abs(wav_data.min()))
                
            # 转换为16位整数
            wav_data_int16 = (wav_data * 32767).astype(np.int16)
            
            # 分块处理
            total_samples = len(wav_data_int16)
            sample_rate = 24000
            
            # 创建WAV文件头
            wav_header = self._create_wav_header(total_samples, sample_rate)
            yield wav_header
            
            # 分块发送音频数据
            for i in range(0, total_samples, chunk_size):
                chunk = wav_data_int16[i:i+chunk_size]
                yield chunk.tobytes()
                # 模拟流式处理的延迟
                await asyncio.sleep(0.1)  # 每个块之间的延迟
                
            self.logger.info(f"流式语音生成完成，总长度: {total_samples/sample_rate:.2f}秒")
            
        except Exception as e:
            self.logger.error(f"流式语音生成失败: {e}")
            raise
    
    def _create_wav_header(self, data_length, sample_rate=24000, channels=1, bits_per_sample=16):
        """创建WAV文件头
        
        Args:
            data_length: 音频数据长度（采样点数）
            sample_rate: 采样率
            channels: 通道数
            bits_per_sample: 每个采样点的位数
            
        Returns:
            WAV文件头的字节数据
        """
        # 计算数据大小（字节）
        data_size = data_length * channels * (bits_per_sample // 8)
        
        # RIFF头
        riff = b'RIFF'
        file_size = data_size + 36  # 文件大小 = 数据大小 + 头部大小(36)
        wave = b'WAVE'
        
        # fmt子块
        fmt = b'fmt '
        fmt_size = 16  # PCM格式的fmt子块大小为16
        audio_format = 1  # PCM = 1
        block_align = channels * (bits_per_sample // 8)
        byte_rate = sample_rate * block_align
        
        # data子块
        data_id = b'data'
        
        # 构建头部
        header = (
            riff +
            file_size.to_bytes(4, 'little') +
            wave +
            fmt +
            fmt_size.to_bytes(4, 'little') +
            audio_format.to_bytes(2, 'little') +
            channels.to_bytes(2, 'little') +
            sample_rate.to_bytes(4, 'little') +
            byte_rate.to_bytes(4, 'little') +
            block_align.to_bytes(2, 'little') +
            bits_per_sample.to_bytes(2, 'little') +
            data_id +
            data_size.to_bytes(4, 'little')
        )
        
        return header
              
    async def close(self):
        """关闭客户端"""
        self.logger.info("关闭ChatTTS客户端")
    