"""
Fish Speech 推理脚本
使用方法：
1. 确保已下载模型到 checkpoints/openaudio-s1-mini 目录
2. 准备参考音频和对应的文本
3. 运行脚本生成语音

"""

import torch
import numpy as np
import soundfile as sf
from pathlib import Path
from loguru import logger
from fish_speech.inference_engine import TTSInferenceEngine
from fish_speech.models.dac.inference import load_model as load_decoder_model
from fish_speech.models.text2semantic.inference import launch_thread_safe_queue
from fish_speech.utils.schema import ServeTTSRequest, ServeReferenceAudio
from fish_speech.utils.file import audio_to_bytes

# 配置参数
CONFIG = {
    # 模型参数
    "checkpoint_path": "checkpoints/openaudio-s1-mini",
    "device": "cuda" if torch.cuda.is_available() else "cpu",
    "precision": torch.bfloat16,
    "compile": False,  # 暂时禁用编译优化 # 安装 triton https://hf-mirror.com/madbuda/triton-windows-builds
    
    # 生成参数
    "temperature": 0.7,      # 控制随机性 (0.6-0.8)
    "top_p": 0.7,           # 采样范围 (0.6-0.8)
    "repetition_penalty": 1.2,  # 重复惩罚 (1.1-1.3)
    "max_new_tokens": 1024,  # 最大生成长度
    "chunk_length": 200,     # 分块长度
    
    # 音频参数
    "amplitude": 32768,  # 音频振幅
}

def init_engine():
    """初始化推理引擎"""
    logger.info("正在加载模型...")
    
    # 1. 加载 LLaMA 模型
    llama_queue = launch_thread_safe_queue(
        checkpoint_path=CONFIG["checkpoint_path"],
        device=CONFIG["device"],
        precision=CONFIG["precision"],
        compile=CONFIG["compile"],
    )
    
    # 2. 加载解码器模型
    decoder_model = load_decoder_model(
        config_name="modded_dac_vq",
        checkpoint_path=str(Path(CONFIG["checkpoint_path"]) / "codec.pth"),
        device=CONFIG["device"],
    )
    
    # 3. 创建推理引擎
    return TTSInferenceEngine(
        llama_queue=llama_queue,
        decoder_model=decoder_model,
        precision=CONFIG["precision"],
        compile=CONFIG["compile"],
    )

def generate_speech(engine, text, ref_audio_path, ref_text, output_path="output_0.wav"):
    """生成语音
    
    Args:
        engine: TTSInferenceEngine 实例
        text: 要生成的文本
        ref_audio_path: 参考音频路径
        ref_text: 参考音频对应的文本
        output_path: 输出音频路径
        
    Returns:
        bool: 是否生成成功
    """
    # 1. 准备请求
    ref_audio = audio_to_bytes(ref_audio_path)
    if ref_audio is None:
        logger.error(f"无法读取参考音频: {ref_audio_path}")
        return False
        
    request = ServeTTSRequest(
        text=text,
        references=[ServeReferenceAudio(audio=ref_audio, text=ref_text)],
        reference_id=None,
        max_new_tokens=CONFIG["max_new_tokens"],
        chunk_length=CONFIG["chunk_length"],
        top_p=CONFIG["top_p"],
        repetition_penalty=CONFIG["repetition_penalty"],
        temperature=CONFIG["temperature"],
        format="wav",
    )
    
    # 2. 执行推理
    logger.info("开始生成...")
    segments = []
    sample_rate = None
    
    for result in engine.inference(request):
        if result.code == "error":
            logger.error(f"生成错误: {result.error}")
            return False
        elif result.code in ["segment", "final"]:
            if isinstance(result.audio, tuple):
                current_rate, audio = result.audio
                if sample_rate is None:
                    sample_rate = current_rate
                elif sample_rate != current_rate:
                    logger.warning(f"采样率不一致: {sample_rate} != {current_rate}")
                segments.append(audio)
    
    # 3. 保存音频
    if segments and sample_rate is not None:
        audio = np.concatenate(segments, axis=0)
        audio = (audio * CONFIG["amplitude"]).astype(np.int16)
        sf.write(output_path, audio, sample_rate)
        logger.info(f"生成完成，已保存到 {output_path}")
        return True
    
    logger.error("生成失败：没有生成音频数据")
    return False

if __name__ == "__main__":
    # 初始化引擎
    engine = init_engine()
    
    # 生成语音
    generate_speech(
        engine=engine,
        text="Hello World！",
        ref_audio_path="assets/test_taishang.wav",
        ref_text="对，这就是我，万人敬仰的太乙真人，虽然有点婴儿肥，但也掩不住我逼人的帅气。"
    )