# 服务端：voice_api.py
import torch
import numpy as np
import uvicorn
from datetime import datetime
from pathlib import Path
from fastapi import FastAPI, HTTPException, Request, Query, WebSocket
from fastapi.responses import StreamingResponse
from fastapi.middleware.cors import CORSMiddleware
from typing import Optional, Dict, Any, List
from websockets import ConnectionClosed
from contextlib import asynccontextmanager
import random
import threading
import torch
import numpy as np
from audio_manager import Voice, official_audio_processing, create_opus_encoder, float32_pcm_to_opus, float32_pcm_to_wav
from opus_encode import generate_ogg_opus_stream
from audio_model import AudioModel

'''
api 接口
复刻语音在6s以内，过长输出会卡顿
实时流输出格式：opus | pcm
'''

# 线程安全锁
voice_lock = threading.Lock()
opus_encoder = create_opus_encoder()

# 全局配置
class Config:
    DEFAULT_VOICE = "ying"
    VOICE_DIR = Path(__file__).parent / "asset/voices"
    TEMP_DIR = Path(__file__).parent / "temp_audio"
    VOICE_CACHE: Dict[str, Any] = {}
    OUTPUT_SAMPLE_RATE = 24000  # 合成输出24kHz pcm

# 模型初始化
def init_model():
    global audioModel
    print("初始化语音合成模型...")
    try:
        # 设置随机种子确保结果可复现
        seed = random.randint(1, 100000)
        print(f"随机种子: {seed}")
        torch.manual_seed(seed)

        # 初始化模型
        audioModel = AudioModel()
        print("语音合成模型初始化成功")
        
        # 加载默认音色
        voices = init_config()
        print("可用的音色:")
        for voice in voices:
            print(f" - {voice['title']} (ID: {voice['name']}): {voice['file']}")
    except Exception as e:
        import traceback
        traceback.print_exc()
        raise RuntimeError(f"模型初始化失败: {str(e)}")

# 初始化配置：音色
def init_config():
    # 创建所需目录
    Config.VOICE_DIR.mkdir(parents=True, exist_ok=True)
    Config.TEMP_DIR.mkdir(exist_ok=True)
    
    # 清理旧文件
    for file in Config.TEMP_DIR.glob("*.wav"):
        if file.stat().st_mtime < datetime.now().timestamp() - 3600:
            try: file.unlink()
            except: pass
    
    # 缓存音色
    cache_available_voices()
    
    return get_available_voices()

# 获取所有可用音色
def get_available_voices() -> List[Dict[str, str]]:
    return [
        {
            "name": name,
            "title": data["title"],
            "file": Path(data["file"]).name,  # 只返回文件名
            "type": Path(data["file"]).suffix[1:]
        } 
        for name, data in Config.VOICE_CACHE.items()
    ]

# 音色缓存
def cache_available_voices():
    Config.VOICE_CACHE.clear()
    
    # 支持的音频格式
    audio_extensions = ["*.mp3", "*.wav", "*.flac"]
    
    for ext in audio_extensions:
        for audio_file in Config.VOICE_DIR.glob(ext):
            voice_name = audio_file.stem
            if voice_name not in Config.VOICE_CACHE:
                voice = Voice(voice_name)
                if voice.load_data(audio_file):
                    Config.VOICE_CACHE[voice_name] = {
                        "audio": voice.audio_tensor,
                        "title": voice.title,
                        "text": voice.prompt_text,
                        "file": str(audio_file)
                    }

# 核心合成功能 - 流式 - 支持两种编码格式 pcm | opus
def audio_stream_generator(text: str, voice_name: str, encoding: str = "pcm"):
    # 获取音色数据
    voice_data = Config.VOICE_CACHE.get(voice_name)
    if not voice_data:
        raise ValueError(f"未找到音色: {voice_name}")

    # 生成音频流
    generator = audioModel.generate_stream(text, voice_data["text"], voice_data["audio"])
    
    for chunk in generator:
        # 应用后处理（24kHz）
        processed = official_audio_processing(chunk['tts_speech'])
        audio_np = processed.squeeze().cpu().numpy().astype(np.float32)

        if encoding == "pcm":
            # 直接返回float32 PCM数据
            yield audio_np.tobytes()
        elif encoding == "opus":
            opus_data = float32_pcm_to_opus(audio_np, opus_encoder)
            yield opus_data
        elif encoding == "wav":
            # 转换为WAV格式
            wav_data = float32_pcm_to_wav(audio_np)
            yield wav_data
        else:
            raise ValueError(f"不支持的编码格式: {encoding}")

# ======================== FastAPI 应用 ========================
@asynccontextmanager
async def lifespan(app: FastAPI):
    """应用生命周期管理"""
    print("🚀 启动服务...")
    init_model()
    print("✅ 服务已就绪，等待请求...")
    yield
    # 应用关闭时的清理工作
    print("🛑 关闭服务...")


app = FastAPI(
    title="语音合成API服务",
    description="提供高质量的实时和批量语音合成接口",
    version="1.0.0",
    lifespan=lifespan
)

# 配置CORS:允许跨域
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],  # 允许所有域，生产环境应指定具体域名
    allow_credentials=True,
    allow_methods=["*"],  # 允许所有方法
    allow_headers=["*"],  # 允许所有头
)

# --------------------------- API 接口实现 ---------------------------
@app.get("/", include_in_schema=False)
async def root():
    """根端点状态检查"""
    return {
        "service": "语音合成API",
        "status": "运行中",
        "version": "1.0.0"
    }

@app.get("/voices", summary="获取可用音色列表")
def get_voices():
    return {
        "default_voice": Config.DEFAULT_VOICE,
        "voices": get_available_voices()
    }

@app.post("/stream", summary="流式语音合成")
async def stream_audio(
    request: Request, 
    voice: Optional[str] = Query(Config.DEFAULT_VOICE, description="指定音色名称（可选）"),
    encoding: Optional[str] = Query("pcm", description="音频编码格式: pcm、opus或wav")
):  
    """
    流式语音合成接口（实时生成）
    - 接收文本数据
    - 返回24kHz PCM音频流
    """
    try:
        # 获取请求文本
        text = (await request.body()).decode('utf-8')
        if not text.strip():
            raise HTTPException(400, "文本内容不能为空")
        
        # 检查音色是否存在
        if voice not in Config.VOICE_CACHE:
            voice = Config.DEFAULT_VOICE
            
        # 返回流式响应
        media_type = "audio/pcm"
        if encoding == "opus":
            media_type = "audio/ogg"
        elif encoding == "wav":
            media_type = "audio/wav"
    
        return StreamingResponse(
            audio_stream_generator(text, voice, encoding=encoding),
            media_type=media_type,
            headers={
                "Content-Type": media_type,
                "Sample-Rate": str(Config.OUTPUT_SAMPLE_RATE),
                "Channels": "1",
                "Sample-Format": "float32" if encoding=="pcm" else "opus" if encoding=="opus" else "wav",
                "X-Voice-Used": voice,
                "X-Encoding": encoding
            }
        )
    except Exception as e:
        raise HTTPException(400, detail=str(e))

FRAME_SIZE = 120  # 每帧样本数（5ms 帧长）

@app.websocket("/realtime")
async def realtime_audio(websocket: WebSocket):
    """
    实时语音流API (WebSocket)
    - 客户端首先发送配置信息（JSON格式）
    - 然后持续发送文本片段进行合成
    - 服务端实时返回二进制音频流
    """
    await websocket.accept()
    
    try:
        # 1. 接收初始配置
        config = await websocket.receive_json()
        voice = config.get("voice", Config.DEFAULT_VOICE)
        encoding = config.get("encoding", "pcm").lower()  # 统一小写格式
        sample_rate = config.get("sample_rate", 24000)    # 默认采样率
        
        # 验证语音配置
        if voice not in Config.VOICE_CACHE:
            await websocket.send_json({
                "status": "error",
                "message": f"Invalid voice '{voice}'. Available: {list(Config.VOICE_CACHE.keys())}"
            })
            return
        voice_data = Config.VOICE_CACHE[voice]

        # 验证编码格式
        valid_encodings = ["pcm", "opus", "wav"]
        if encoding not in valid_encodings:
            await websocket.send_json({
                "status": "error",
                "message": f"Unsupported encoding '{encoding}'. Valid: {valid_encodings}"
            })
            return

        # 发送准备就绪信号
        await websocket.send_json({
            "status": "ready",
            "voice": voice,
            "encoding": encoding,
            "sample_rate": sample_rate
        })

        # 2. 语音生成循环
        while True:
            message = await websocket.receive_text()
            if message.strip().upper() == "CLOSE":
                break
            if not message.strip():
                continue  # 跳过空消息

            try:
                generator = audioModel.generate_stream(
                    text=message,
                    prompt_text=voice_data["text"],
                    audio=voice_data["audio"]
                )

                for chunk in generator:
                        # 处理音频块
                        processed = official_audio_processing(chunk['tts_speech'])
                        audio_np = processed.squeeze().cpu().numpy().astype(np.float32)
                        
                        # 根据编码格式处理
                        if encoding == "pcm":
                            await websocket.send_bytes(audio_np.tobytes())
                        elif encoding == "wav":
                            wav_bytes = float32_pcm_to_wav(audio_np)
                            await websocket.send_bytes(wav_bytes)
                        elif encoding == "opus":
                            opus_data = float32_pcm_to_opus(audio_np, opus_encoder)
                            #opus_data = float32_pcm_to_opus(audio_np)
                            await websocket.send_bytes(opus_data)

                # if encoding == "opus":
                #      # 创建PCM帧生成器
                #     def pcm_frame_generator():
                #         for chunk in generator:
                #             processed = official_audio_processing(chunk['tts_speech'])
                #             audio_np = processed.squeeze().cpu().numpy().astype(np.float32)
                            
                #             # 确保帧长度为FRAME_SIZE
                #             if len(audio_np) == FRAME_SIZE:
                #                 yield audio_np
                #             else:
                #                 # 处理不匹配长度的帧（截断或重复）
                #                 adjusted = np.resize(audio_np, FRAME_SIZE)
                #                 yield adjusted
                    
                #     # 创建Ogg Opus流生成器
                #     ogg_stream = generate_ogg_opus_stream(pcm_frame_generator())
                    
                #     # 发送Ogg流
                #     for ogg_page in ogg_stream:
                #         await websocket.send_bytes(ogg_page)
                # else:
                #     for chunk in generator:
                #         # 处理音频块
                #         processed = official_audio_processing(chunk['tts_speech'])
                #         audio_np = processed.squeeze().cpu().numpy().astype(np.float32)
                        
                #         # 根据编码格式处理
                #         if encoding == "pcm":
                #             await websocket.send_bytes(audio_np.tobytes())
                #         elif encoding == "wav":
                #             wav_bytes = float32_pcm_to_wav(audio_np)
                #             await websocket.send_bytes(wav_bytes)
                #         elif encoding == "opus":
                #             opus_data = float32_pcm_to_opus(audio_np, opus_encoder)
                #             #opus_data = float32_pcm_to_opus(audio_np)
                #             await websocket.send_bytes(opus_data)
            
            except Exception as gen_error:
                await websocket.send_json({
                    "status": "error",
                    "message": f"Audio generation failed: {str(gen_error)[:100]}"
                })
                continue  # 继续处理后续请求

    except ConnectionClosed:
        pass  # 客户端正常关闭连接
    except Exception as e:
        try:
            await websocket.send_json({
                "status": "error",
                "message": f"Server error: {str(e)[:200]}"
            })
        except:
            pass
    finally:
        try:
            await websocket.close()
        except:
            pass

if __name__ == "__main__":
    uvicorn.run("voice_api:app", host="0.0.0.0", port=8000, reload=True)