# tts_api.py
import os
import argparse
import numpy as np
import torch
import torchaudio
import librosa
import sys
import io
import traceback
from contextlib import asynccontextmanager
from fastapi import FastAPI, UploadFile, Form, HTTPException
from fastapi.responses import Response
from typing import Optional, Dict, Any

# 系统路径设置
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(f"{ROOT_DIR}/third_party/Matcha-TTS")

# 模型相关导入
from cosyvoice.cli.cosyvoice import CosyVoice, CosyVoice2
from cosyvoice.utils.file_utils import load_wav
from cosyvoice.utils.common import set_all_random_seed

# 常量定义
INFERENCE_MODES = {
    'pre-trained': ['预训练音色', 'sft'],
    'zero-shot': ['3s极速复刻', 'zero_shot'],
    'cross-lingual': ['跨语种复刻', 'cross_lingual'],
    'instruct': ['自然语言控制', 'instruct']
}
MAX_VOLUME = 0.8
PROMPT_SR = 16000

# 生命周期管理
@asynccontextmanager
async def lifespan(app: FastAPI):
    global tts_model, model_type
    try:
        try:
            tts_model = CosyVoice(args.model_dir)
            model_type = 'v1'
        except Exception as e:
            print(f"尝试加载CosyVoice失败，错误：{str(e)}，尝试加载CosyVoice2")
            tts_model = CosyVoice2(args.model_dir)
            model_type = 'v2'

        print(f"模型加载成功，类型：{model_type}")
        print(f"支持音色：{tts_model.list_available_spks()}")
    except Exception as e:
        raise RuntimeError(f"模型初始化失败: {str(e)}")
    yield
    del tts_model

app = FastAPI(lifespan=lifespan)

def validate_mode(mode: str) -> str:
    """验证并标准化推理模式"""
    for key, values in INFERENCE_MODES.items():
        if mode in values:
            return key
    raise ValueError(f"无效模式，可选：{list(INFERENCE_MODES.values())}")

def post_process(waveform: torch.Tensor, sample_rate: int) -> torch.Tensor:
    """音频后处理"""
    speech_np = waveform.squeeze(0).numpy()

    # 静音裁剪
    trimmed, _ = librosa.effects.trim(
        speech_np,
        top_db=40,
        frame_length=2048,
        hop_length=512
    )

    # 音量归一化
    max_amp = np.max(np.abs(trimmed))
    if max_amp > 1e-6:
        trimmed = trimmed / max_amp * MAX_VOLUME

    # 添加尾部静音
    trimmed = np.concatenate([
        trimmed,
        np.zeros(int(0.2 * sample_rate))
    ])

    return torch.from_numpy(trimmed).unsqueeze(0)

def generate_error_audio(sample_rate: int) -> bytes:
    """生成错误音频"""
    error_wav = torch.zeros(1, sample_rate)  # 1秒静音
    buffer = io.BytesIO()
    torchaudio.save(
        buffer, error_wav, sample_rate,
        format="wav", encoding="PCM_S", bits_per_sample=16
    )
    return buffer.getvalue()

@app.post("/generate")
async def generate_audio(
        text: str = Form(...),
        mode: str = Form(...),
        speaker: Optional[str] = Form(None),
        prompt_text: Optional[str] = Form(None),
        prompt_audio: Optional[UploadFile] = None,
        instruct_text: Optional[str] = Form(None),
        seed: int = Form(0),
        speed: float = Form(1.0)
) -> Response:
    try:
        # === 参数验证 ===
        try:
            norm_mode = validate_mode(mode)
        except ValueError as e:
            raise HTTPException(400, str(e))

        # === 处理上传音频 ===
        prompt_path = None
        if prompt_audio:
            try:
                prompt_path = f"temp_{prompt_audio.filename}"
                with open(prompt_path, "wb") as f:
                    f.write(await prompt_audio.read())
            except Exception as e:
                raise HTTPException(500, f"音频上传失败: {str(e)}")

        # === 生成参数构造 ===
        base_params = {
            "tts_text": text,
            "stream": True,
            "speed": speed,
            "text_frontend": True
        }

        # 模式特定参数
        mode_params = {}
        if norm_mode == 'pre-trained':
            if not speaker:
                speaker = tts_model.list_available_spks()[0]
            mode_params = {"spk_id": speaker}

        elif norm_mode == 'zero-shot':
            if not prompt_text or not prompt_path:
                raise HTTPException(400, "此模式需要prompt_text和prompt_audio")
            prompt_wav = load_wav(prompt_path, PROMPT_SR)
            mode_params = {
                "prompt_text": prompt_text,
                "prompt_speech_16k": prompt_wav
            }

        elif norm_mode == 'cross-lingual':
            if not prompt_path:
                raise HTTPException(400, "此模式需要prompt_audio")
            prompt_wav = load_wav(prompt_path, PROMPT_SR)
            mode_params = {"prompt_speech_16k": prompt_wav}

        elif norm_mode == 'instruct':
            if model_type == 'v2':
                if not prompt_path or not instruct_text:
                    raise HTTPException(400, "此模式需要instruct_text和prompt_audio")
                prompt_wav = load_wav(prompt_path, PROMPT_SR)
                mode_params = {
                    "instruct_text": instruct_text,
                    "prompt_speech_16k": prompt_wav
                }
            else:
                if not speaker or not instruct_text:
                    raise HTTPException(400, "此模式需要speaker和instruct_text")
                mode_params = {
                    "spk_id": speaker,
                    "instruct_text": instruct_text
                }

        # === 执行推理 ===
        try:
            audio_chunks = []
            method_name = INFERENCE_MODES[norm_mode][1]
            method = getattr(tts_model, f"inference_{method_name}")

            for chunk in method(**{**base_params, **mode_params}):
                audio_chunks.append(chunk['tts_speech'].numpy().flatten())

        except Exception as e:
            print(f"生成失败: {traceback.format_exc()}")
            return Response(
                content=generate_error_audio(tts_model.sample_rate),
                media_type="audio/wav",
                headers={"Content-Disposition": "attachment; filename=error.wav"}
            )

        # === 后处理 ===
        full_audio = np.concatenate(audio_chunks)
        waveform = post_process(torch.from_numpy(full_audio).unsqueeze(0), tts_model.sample_rate)

        # === 生成WAV ===
        buffer = io.BytesIO()
        torchaudio.save(
            buffer, waveform, tts_model.sample_rate,
            format="wav", encoding="PCM_S", bits_per_sample=16
        )

        return Response(
            content=buffer.getvalue(),
            media_type="audio/wav",
            headers={
                "Content-Disposition": "attachment; filename=output.wav",
                "Sample-Rate": str(tts_model.sample_rate)
            }
        )

    finally:
        if prompt_path and os.path.exists(prompt_path):
            os.remove(prompt_path)

if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('--port', type=int, default=8000)
    parser.add_argument('--model_dir', type=str, required=True)
    args = parser.parse_args()

    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=args.port)