import os
import numpy as np
import librosa
import soundfile as sf
import random

def merge_conversation_audio_timeline(first_paths, second_paths, output_name,
                                      target_sr=44100, pause_sec=0.6,
                                      random_pause=False, normalize=True, debug=True):
    """
    自然交替合并两组语音片段为各自轨道（男/女），
    每段后可加入停顿，使对话更自然。
    
    Args:
        first_paths (list[str]): 男声音频路径列表
        second_paths (list[str]): 女声音频路径列表
        output_name (str): 输出基名（生成 output_name_1.wav / output_name_2.wav）
        target_sr (int): 目标采样率，默认 44100
        pause_sec (float): 每段后固定停顿秒数
        random_pause (bool): 是否每段停顿随机化（0.4~1.0秒）
        normalize (bool): 是否对轨道归一化
        debug (bool): 打印调试信息
    Returns:
        tuple: (男声轨道路径, 女声轨道路径)
    """

    def load_to_mono(path):
        y, sr = librosa.load(path, sr=None, mono=True)
        if sr != target_sr:
            y = librosa.resample(y, orig_sr=sr, target_sr=target_sr)
        return y.astype(np.float32)

    # 1. 加载音频
    first_segments = [load_to_mono(p) for p in first_paths if os.path.exists(p)]
    second_segments = [load_to_mono(p) for p in second_paths if os.path.exists(p)]

    if not first_segments and not second_segments:
        raise ValueError("没有找到任何音频段，请检查路径！")

    # 2. 每段长度（samples）
    lenA = [len(s) for s in first_segments]
    lenB = [len(s) for s in second_segments]

    if debug:
        print("lenA (samples):", lenA)
        print("lenB (samples):", lenB)

    # 3. 时间轴计算
    startA, startB = [], []
    current = 0
    max_rounds = max(len(lenA), len(lenB))

    for i in range(max_rounds):
        # 男声
        if i < len(lenA):
            startA.append(current)
            # 添加停顿
            pause = pause_sec
            if random_pause:
                pause = random.uniform(0.4, 1.0)
            pause_samples = int(pause * target_sr)
            current += lenA[i] + pause_samples
        # 女声
        if i < len(lenB):
            startB.append(current)
            pause = pause_sec
            if random_pause:
                pause = random.uniform(0.4, 1.0)
            pause_samples = int(pause * target_sr)
            current += lenB[i] + pause_samples

    total_samples = current
    if debug:
        print("startA:", startA)
        print("startB:", startB)
        print("total_samples:", total_samples, "≈", total_samples / target_sr, "s")

    # 4. 创建时间轴
    timelineA = np.zeros(total_samples, dtype=np.float32)
    timelineB = np.zeros(total_samples, dtype=np.float32)

    # 5. 填入音频
    for seg, st in zip(first_segments, startA):
        end = st + len(seg)
        timelineA[st:end] = seg

    for seg, st in zip(second_segments, startB):
        end = st + len(seg)
        timelineB[st:end] = seg

    # 6. 归一化
    if normalize:
        peak = max(np.max(np.abs(timelineA)), np.max(np.abs(timelineB)), 1.0)
        timelineA /= peak
        timelineB /= peak

    # 7. 输出路径与目录
    pathA = f"{output_name}_1.wav"
    pathB = f"{output_name}_2.wav"
    os.makedirs(os.path.dirname(pathA), exist_ok=True)

    # 8. 写文件
    sf.write(pathA, timelineA, target_sr)
    sf.write(pathB, timelineB, target_sr)

    if debug:
        print(f"Saved: {pathA} ({len(timelineA)/target_sr:.2f}s)")
        print(f"Saved: {pathB} ({len(timelineB)/target_sr:.2f}s)")

    return pathA, pathB


# --------------------------
# Example usage
# --------------------------
if __name__ == "__main__":
    first_paths = [
        "data/audios/男女版赛事预测_男_1.mp3",
        "data/audios/男女版赛事预测_男_2.mp3",
        "data/audios/男女版赛事预测_男_3.mp3",
    ]
    second_paths = [
        "data/audios/男女版赛事预测_女_1.mp3",
        "data/audios/男女版赛事预测_女_2.mp3",
    ]

    merge_conversation_audio_timeline(
        first_paths, second_paths, 
        "audio/data/audios/combine",
        pause_sec=0.6,
        random_pause=True
    )
