'''
fast api:
提供实时和完整的语音合成服务，支持多种预设音色
'''
import sys
import torch
import numpy as np
import uvicorn
import json
import re
from datetime import datetime
from pathlib import Path
from fastapi import FastAPI, HTTPException, Request, Query, WebSocket
from fastapi.responses import StreamingResponse, FileResponse
from pydub import AudioSegment
from typing import Optional, Dict, Any, List
from websockets.exceptions import ConnectionClosedOK
import librosa
from contextlib import asynccontextmanager

# 修改：添加全局锁以防止多线程冲突
import threading
voice_lock = threading.Lock()

# 解决路径问题
sys.path.append(str(Path(__file__).parent / "third_party/Matcha-TTS"))
from cosyvoice.cli.cosyvoice import CosyVoice2

# 全局配置
class Config:
    DEFAULT_VOICE = "ying"
    VOICE_DIR = Path("asset/voices")
    TEMP_DIR = Path("temp_audio")
    VOICE_DATA: Dict[str, Any] = {}
    VOICE_CACHE: Dict[str, Dict] = {}  # 音色数据缓存
    COSYVOICE_MODEL = None

# 音色类封装
class Voice:
    def __init__(self, name: str):
        self.name = name
        self.title = name  # 默认标题等于名称
        self.prompt_text = f"我是{name}，这是我的声音。"  # 默认提示文本
        self.audio_tensor = None
        self.file = None
        self.is_loaded = False

    def load_data(self, audio_file: Path):
        """加载音色数据（音频和文本）"""
        try:
            # 加载音频文件
            self.audio_tensor = load_mp3_as_16k(audio_file)
            self.file = str(audio_file)
            
            # 查找并解析文本文件
            text_path = audio_file.with_suffix(".txt")
            if text_path.exists():
                # 尝试用不同编码读取文本文件
                try:
                    content = text_path.read_text(encoding='utf-8', errors='ignore').strip()
                except:
                    try:
                        content = text_path.read_text(encoding='gbk', errors='ignore').strip()
                    except:
                        content = ""
                
                # 解析文本格式：第一行可能是"名称：内容"
                if content:
                    # 分割第一行和其他行
                    lines = content.split('\n', 1)
                    first_line = lines[0].strip()
                    
                    # 尝试解析名称：内容格式
                    match = re.match(r"(.+?) *[:：] *(.+)", first_line)
                    if match:
                        self.title = match.group(1).strip()
                        # 如果有其他行内容，合并为提示文本
                        if len(lines) > 1:
                            self.prompt_text = f"{match.group(2).strip()}\n{lines[1].strip()}"
                        else:
                            self.prompt_text = match.group(2).strip()
                    else:
                        # 没有名称：内容格式，整个文件为提示文本
                        self.prompt_text = content
            self.is_loaded = True
            return True
        except Exception as e:
            print(f"加载音色 '{self.name}' 失败: {str(e)}")
            self.prompt_text = f"我是{self.name}，这是我的声音。"
            return False

# 初始化配置
def init_config():
    """初始化配置，确保目录存在"""
    Config.VOICE_DIR.mkdir(parents=True, exist_ok=True)
    Config.TEMP_DIR.mkdir(exist_ok=True)
    
    # 自动清理过期临时文件（超过1小时）
    for file in Config.TEMP_DIR.glob("*.wav"):
        if file.stat().st_mtime < (datetime.now().timestamp() - 3600):
            try:
                file.unlink()
            except:
                pass
    
    # 缓存所有本地音色
    cache_available_voices()
    
    # 设置默认音色
    if Config.DEFAULT_VOICE in Config.VOICE_CACHE:
        set_voice(Config.DEFAULT_VOICE)
    
    return get_available_voices()

def cache_available_voices():
    """缓存所有可用的音色数据"""
    # 查找所有音频文件
    audio_files = []
    for ext in ["*.mp3", "*.wav"]:
        audio_files.extend(Config.VOICE_DIR.glob(ext))
    
    # 初始化音色缓存
    for audio_file in audio_files:
        voice_name = audio_file.stem
        if voice_name not in Config.VOICE_CACHE:
            voice = Voice(voice_name)
            voice.load_data(audio_file)
            Config.VOICE_CACHE[voice_name] = {
                "title": voice.title,
                "prompt_text": voice.prompt_text,
                "file": str(audio_file)
            }

# 初始化语音合成模型
def init_model():
    """延迟初始化语音合成模型"""
    if Config.COSYVOICE_MODEL is None:
        print("正在初始化CosyVoice语音合成模型...")
        try:
            Config.COSYVOICE_MODEL = CosyVoice2(
                'pretrained_models/CosyVoice2-0.5B', 
                load_jit=False, 
                load_trt=False, 
                fp16=True
            )
        except Exception as e:
            print(f"模型初始化失败: {str(e)}")
            raise RuntimeError(f"语音合成模型初始化失败: {str(e)}")

# 返回 PyTorch 张量
def load_mp3_as_16k(file_path, target_sr=16000, mono=True) -> torch.Tensor:
    """加载MP3文件并转换为目标采样率的numpy数组"""
    file_path = Path(file_path)
    if not file_path.exists():
        raise FileNotFoundError(f"音频文件不存在: {file_path}")
    
    audio = AudioSegment.from_file(str(file_path))
    if mono and audio.channels > 1:
        audio = audio.set_channels(1)
    
    samples = np.array(audio.get_array_of_samples())
    max_value = 2**(audio.sample_width * 8 - 1)
    samples = samples.astype(np.float32) / max_value
    
    if audio.frame_rate != target_sr:
        samples = librosa.resample(samples, orig_sr=audio.frame_rate, target_sr=target_sr)
    
    return torch.from_numpy(samples).float().unsqueeze(0)

# 获取所有可用音色
def get_available_voices() -> List[Dict[str, str]]:
    """获取所有可用的音色选项"""
    voices = []
    
    for voice_name, data in Config.VOICE_CACHE.items():
        voices.append({
            "name": voice_name,
            "title": data["title"],
            "file": data["file"],
            "type": Path(data["file"]).suffix[1:]
        })
    
    return voices

# 设置当前使用的音色
def set_voice(voice_name: str, set_as_current: bool = True):
    """设置并加载音色配置"""
    if not voice_name:
        voice_name = Config.DEFAULT_VOICE
    
    if voice_name not in Config.VOICE_CACHE:
        raise ValueError(f"音色 '{voice_name}' 不存在或未缓存")
    
    # 初始化模型（如果需要）
    if Config.COSYVOICE_MODEL is None:
        init_model()
    
    # 加锁确保线程安全
    with voice_lock:
        # 检查是否已加载
        if voice_name in Config.VOICE_DATA:
            voice_data = Config.VOICE_DATA[voice_name]
        else:
            # 从缓存中获取音色数据
            voice_cache = Config.VOICE_CACHE[voice_name]
            
            # 加载音频数据
            try:
                audio_tensor = load_mp3_as_16k(voice_cache["file"])
            except Exception as e:
                raise RuntimeError(f"无法加载音色音频: {str(e)}")
            
            # 存储音色数据
            voice_data = {
                "audio": audio_tensor,
                "text": voice_cache["prompt_text"],
                "file": voice_cache["file"],
                "title": voice_cache["title"]
            }
            Config.VOICE_DATA[voice_name] = voice_data
        
        # 设置当前音色
        if set_as_current:
            Config.VOICE_DATA["current_voice"] = voice_name
    
    return voice_data

# 流式合成音频生成器
def audio_stream_generator(text: str, voice_name: Optional[str] = None):
    """流式生成语音音频数据"""
    # 确保模型初始化
    if Config.COSYVOICE_MODEL is None:
        init_model()
    
    # 确定使用的音色
    try:
        if voice_name is None:
            voice_name = Config.VOICE_DATA.get("current_voice", Config.DEFAULT_VOICE)
        
        # 获取音色数据
        if voice_name in Config.VOICE_DATA:
            voice_data = Config.VOICE_DATA[voice_name]
        else:
            voice_data = set_voice(voice_name, set_as_current=False)
    except Exception as e:
        # 回退到默认音色
        print(f"音色设置失败: {str(e)}，使用默认音色")
        voice_data = Config.VOICE_DATA.get(Config.DEFAULT_VOICE)
        if not voice_data:
            raise RuntimeError("无法加载任何音色数据")
    
    # 验证文本
    if not text.strip():
        raise ValueError("合成文本不能为空")
    
    # 使用模型生成音频
    generator = Config.COSYVOICE_MODEL.inference_zero_shot(
        text, 
        voice_data["text"], 
        voice_data["audio"], 
        stream=True
    )
    
    for chunk in generator:
        audio_tensor = chunk['tts_speech'].squeeze().cpu()
        audio_data = audio_tensor.numpy().astype(np.float32)
        yield audio_data.tobytes()

# 生成完整音频文件
def generate_full_audio(text: str, voice_name: Optional[str] = None) -> Path:
    """生成完整的语音文件"""
    if Config.COSYVOICE_MODEL is None:
        init_model()
    
    # 确定使用的音色
    if voice_name is None or voice_name not in Config.VOICE_DATA:
        if "current_voice" not in Config.VOICE_DATA:
            set_voice(Config.DEFAULT_VOICE)
        voice_name = Config.VOICE_DATA.get("current_voice", Config.DEFAULT_VOICE)
    
    voice_data = Config.VOICE_DATA.get(voice_name)
    if voice_data is None:
        set_voice(Config.DEFAULT_VOICE)
        voice_data = Config.VOICE_DATA[Config.DEFAULT_VOICE]
    
    # 使用模型的非流式接口生成完整音频
    result = Config.COSYVOICE_MODEL.inference_zero_shot(
        text, 
        voice_data["text"], 
        voice_data["audio"], 
        stream=False
    )
    audio_tensor = result['tts_speech'].squeeze().cpu()
    audio_data = audio_tensor.numpy().astype(np.float32)
    
    # 转换为Pydub音频对象
    audio_segment = AudioSegment(
        audio_data.tobytes(),
        frame_rate=16000,
        sample_width=4,  # 32位浮点数=4字节
        channels=1
    )
    
    # 生成唯一文件名
    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
    filename = f"{voice_name}_{timestamp}.wav"
    output_path = Config.TEMP_DIR / filename
    
    # 保存音频
    audio_segment.export(str(output_path), format="wav")
    
    return output_path

# --------------------------- FastAPI 应用初始化 ---------------------------
@asynccontextmanager
async def lifespan(app: FastAPI):
    """应用生命周期管理"""
    voices = init_config()
    print("可用的音色:")
    for voice in voices:
        print(f" - {voice['title']} (ID: {voice['name']}): {voice['file']}")
    
    try:
        if voices:
            print(f"已设置默认音色: {Config.VOICE_DATA['current_voice']}")
        else:
            print("警告：没有找到任何音色文件！")
    except Exception as e:
        print(f"无法加载默认音色: {str(e)}")
    
    print("服务启动完成，等待请求...")
    yield
    
    # 应用关闭时的清理工作
    print("关闭服务...")
    if Config.COSYVOICE_MODEL:
        # 如果需要，添加模型清理代码
        pass

app = FastAPI(
    title="Voice Synthesis API",
    description="提供实时和完整的语音合成服务，支持多种预设音色",
    lifespan=lifespan
)

# --------------------------- API 接口实现 ---------------------------
@app.get("/voices", summary="获取可用音色列表")
def get_voices():
    """获取所有可用的音色选项"""
    return {
        "default_voice": Config.DEFAULT_VOICE,
        "current_voice": Config.VOICE_DATA.get("current_voice", Config.DEFAULT_VOICE),
        "voices": get_available_voices()
    }

@app.post("/voices/set", summary="设置当前使用的音色")
async def set_current_voice(voice_name: str = Query(..., description="要设置的音色名称")):
    """设置当前使用的音色"""
    try:
        set_voice(voice_name)
        return {"success": True, "current_voice": voice_name, "title": Config.VOICE_DATA[voice_name]["title"]}
    except Exception as e:
        raise HTTPException(400, detail=str(e))

@app.post("/stream", summary="流式语音合成")
async def stream_audio(
    request: Request, 
    voice: Optional[str] = Query(None, description="指定音色名称（可选）")
):
    """
    实时语音流API - 输入文本返回实时音频流
    
    音频参数: 
        - 格式: PCM原始音频流
        - 采样率: 16000 Hz
        - 声道: 单声道
        - 格式: 32位浮点数
    """
    try:
        # 直接获取文本 - 修复: 读取body内容
        request_body = await request.body()
        text = request_body.decode('utf-8')
        
        # 验证文本
        if not text.strip():
            raise ValueError("文本内容不能为空")
        
        # 获取音色数据
        try:
            voice_name = voice or Config.VOICE_DATA.get("current_voice", Config.DEFAULT_VOICE)

        except:
            voice_name = Config.DEFAULT_VOICE
            
        # 音频流响应头
        headers = {
            "Content-Type": "audio/pcm",
            "Sample-Rate": "16000",
            "Channels": "1",
            "Sample-Format": "float32",
            "X-Voice-Used": voice_name
        }
        
        # 返回流式响应
        return StreamingResponse(
            audio_stream_generator(text, voice_name),
            media_type="audio/pcm",
            headers=headers
        )
        
    except Exception as e:
        import traceback
        traceback.print_exc()  # 打印详细错误日志
        raise HTTPException(400, detail=str(e))

@app.post("/synthesize", summary="完整语音合成")
async def full_audio(
    request: Request, 
    voice: Optional[str] = Query(None, description="指定音色名称（可选）"),
    filename: Optional[str] = Query(None, description="自定义输出文件名（可选）")
):
    """生成完整语音文件，返回WAV格式下载"""
    try:
        text = (await request.body()).decode('utf-8')
        if not text.strip():
            raise ValueError("文本内容不能为空")
            
        output_path = generate_full_audio(text, voice)
        
        # 确定最终文件名
        voice_name = voice or Config.VOICE_DATA.get("current_voice", Config.DEFAULT_VOICE)
        voice_title = Config.VOICE_DATA.get(voice_name, {}).get("title", voice_name)
        download_filename = filename or f"{voice_title}_{datetime.now().strftime('%H%M%S')}.wav"
        if not download_filename.endswith(".wav"):
            download_filename += ".wav"
        
        return FileResponse(
            path=output_path,
            media_type="audio/wav",
            filename=download_filename,
            headers={
                "X-Voice-Used": voice_name
            }
        )
    except Exception as e:
        raise HTTPException(400, detail=str(e))

@app.websocket("/realtime", name="实时语音流")
async def realtime_audio(websocket: WebSocket):
    """实时语音流API - 基于WebSocket的实时双向通信"""
    await websocket.accept()
    init_model()
    
    try:
        # 接收初始配置（音色选择）
        config = await websocket.receive_text()
        try:
            config_data = json.loads(config)
            voice_name = config_data.get("voice")
            if voice_name:
                set_voice(voice_name)
        except:
            pass
        
        # 当前音色
        current_voice = Config.VOICE_DATA.get("current_voice", Config.DEFAULT_VOICE)
        voice_data = Config.VOICE_DATA.get(current_voice)
        voice_title = voice_data.get("title", current_voice)
        
        await websocket.send_text(f"READY|Voice:{voice_title}")
        
        while True:
            # 接收文本消息
            message = await websocket.receive_text()
            if message == "CLOSE":
                break
                
            # 处理特殊指令
            if message.startswith("SET_VOICE:"):
                try:
                    voice_name = message.split(":", 1)[1].strip()
                    set_voice(voice_name)
                    current_voice = Config.VOICE_DATA["current_voice"]
                    voice_title = Config.VOICE_DATA[current_voice].get("title", current_voice)
                    await websocket.send_text(f"VOICE_SET:{voice_title}")
                    continue
                except Exception as e:
                    await websocket.send_text(f"ERROR:{str(e)}")
                    continue
            
            # 合成语音
            generator = Config.COSYVOICE_MODEL.inference_zero_shot(
                message, 
                voice_data["text"], 
                voice_data["audio"], 
                stream=True
            )
            
            for chunk in generator:
                audio_tensor = chunk['tts_speech'].squeeze().cpu()
                audio_data = audio_tensor.numpy().astype(np.float32)
                await websocket.send_bytes(audio_data.tobytes())
                
            # 发送结束信号
            await websocket.send_text("END_OF_AUDIO")
    
    except ConnectionClosedOK:
        pass
    except Exception as e:
        try:
            await websocket.send_text(f"ERROR:{str(e)}")
            await websocket.close()
        except:
            pass
    finally:
        try:
            await websocket.close()
        except:
            pass

if __name__ == "__main__":
    uvicorn.run("voice_api2:app", host="0.0.0.0", port=8000, reload=True)