import subprocess
from pydub import AudioSegment


def run_inference(
        transpose_value,
        input_path,
        output_path,
        model_path,
        index_file_path,
        inference_device,
        method
):
    """
    执行模型推理命令。

    参数:
    - transpose_value: 音高
    - input_path: 输入数据的路径。
    - output_path: 输出结果的路径。
    - model_path: 模型文件的路径。
    - index_file_path: 索引文件的路径。
    - inference_device: 推理任务所使用的设备，例如CPU或GPU。
    - method:  

    返回:
    - None
    """
    # 构建命令行指令
    command = f'python infer_cli.py {transpose_value} "{input_path}" "{output_path}" "{model_path}" "{index_file_path}" "{inference_device}" "{method}"'

    # 执行命令
    try:
        subprocess.run(command, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        print("命令执行成功。")
    except subprocess.CalledProcessError as e:
        print(f"命令执行失败: {e.stderr.decode().strip()}")


# 示例用法
# run_inference("0", "/path/to/input", "/path/to/output", "/path/to/model", "/path/to/index_file", "GPU", "FP32")

def buildCombine(file1, file2, output):
    # "/root/autodl-tmp/demo/vocal_woyaozhaodaoni.mp3" "/root/autodl-tmp/demo/ai_woyaozhaodaoni.wav"
    # 加载两个音频文件
    audio1 = AudioSegment.from_file(file1, format=file1.split('.')[-1])
    audio2 = AudioSegment.from_file(file2, format=file2.split('.')[-1])

    # 确保两个音频文件的采样率相同，如果不是，需要进行转换
    sample_rate = audio1.frame_rate
    audio1 = audio1.set_frame_rate(sample_rate)
    audio2 = audio2.set_frame_rate(sample_rate)

    # 合并音频文件，这里将audio2附加到audio1的末尾
    combined_audio = audio1.overlay(audio2)

    # 导出合成的音频文件
    combined_audio.export(output, format=output.split('.')[-1])



def run_infer_model(model_path,
                    index_file_path,
                    instru_file,
                    input_file,
                    ai_vocal_file,
                    output,
                    method='harvest'):
    """
    运行指定模型
     label=i18n(
                "选择音高提取算法,输入歌声可用pm提速,harvest低音好但巨慢无比,crepe效果好但吃GPU,rmvpe效果最好且微吃GPU"
            ),
            choices=["pm", "harvest", "crepe", "rmvpe"]
    :param model_path:
    :param index_file_path:
    :param instru_file:
    :param input_file:
    :param ai_vocal_file:
    :param output:
    :return:
    """
    file2 =ai_vocal_file# 'tmp.wav'

    run_inference(
        transpose_value="0",
        input_path=input_file,
        output_path=file2,
        model_path=model_path,
        index_file_path=index_file_path,
        inference_device="cuda:0",
        method=method
    )

    print('run buildCombine', instru_file, file2, output)
    buildCombine(instru_file, file2, output)

def run_infer_combine(instru_file, input_file, output):
    """
    运行样例模型
    :param instru_file:
    :param input_file:
    :param output:
    :return:
    """
    model_path = '/root/autodl-tmp/Retrieval-based-Voice-Conversion-WebUI/weights/girl_e100_s400.pth'
    index_file_path = '/root/autodl-tmp/Retrieval-based-Voice-Conversion-WebUI/logs/added_IVF345_Flat_nprobe_1_girl_v2.index'
    print('run infer')
    file2 = 'tmp.wav'
    run_inference(
        transpose_value="0",
        input_path=input_file,
        output_path=file2,
        model_path=model_path,
        index_file_path=index_file_path,
        inference_device="cuda:0",
        method="harvest"
    )

    print('run buildCombine', instru_file, file2, output)
    buildCombine(instru_file, file2, output)

# run_sample()