import os

from faster_whisper import WhisperModel
from tqdm import tqdm


def transcribe_audio(audio_path, model, txt_path, language=None):
    """
    使用 faster-whisper 转写单个音频文件

    Parameters:
    audio_path (str): 音频文件路径
    model (WhisperModel): 已加载的 WhisperModel 实例
    output_dir (str): 输出文件夹路径
    language (str, optional): 音频语言，如果为 None 则自动检测
    """
    try:
        # 使用 VAD 过滤和默认参数进行转写
        segments, info = model.transcribe(
            audio_path,
            language=language,
            vad_filter=True,
            vad_parameters=dict(
                min_silence_duration_ms=500,  # 可以根据需要调整这些参数
                speech_pad_ms=400,
            ),
            word_timestamps=True,
            beam_size=5,
        )
    except Exception as e:
        print(e)
        return txt_path
    # 收集所有段落
    segments_list = []
    full_text = ""

    for segment in segments:
        segment_dict = {
            "id": segment.id,
            "start": segment.start,
            "end": segment.end,
            "text": segment.text,
            "words": [
                {
                    "word": w.word,
                    "start": w.start,
                    "end": w.end,
                    "probability": w.probability,
                }
                for w in segment.words
            ],
        }
        segments_list.append(segment_dict)
        full_text += segment.text + " "

    folder_path = os.path.dirname(txt_path)
    os.makedirs(folder_path, exist_ok=True)

    # 修改输出为CSV格式并包含语言信息
    csv_path = txt_path.replace('.txt', '.csv')
    with open(csv_path, "w", encoding="utf-8") as f:
        f.write("language,language_probability,transcript\n")
        f.write(f'"{info.language}",{info.language_probability},"{full_text.strip()}"\n')
    
    return csv_path


def process_audio_files(
    input_dir,
    output_dir,
    model_size="large-v3",
    device="cuda",
    compute_type="float16",
    language=None,
):
    """
    处理文件夹中的所有音频文件

    Parameters:
    input_dir (str): 输入音频文件夹路径
    output_dir (str): 输出文本文件夹路径
    model_size (str): Whisper 模型大小
    device (str): 运行设备 ("cuda" 或 "cpu")
    compute_type (str): 计算精度
    language (str, optional): 音频语言，如果为 None 则自动检测
    """
    # 创建输出目录（如果不存在）
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    # 加载 Whisper 模型
    print(f"正在加载 {model_size} 模型...")
    model = WhisperModel(model_size, device=device, compute_type=compute_type)
    print("模型加载完成。")

    # 查找所有音频文件
    audio_extensions = [".mp3", ".wav", ".flac", ".ogg", ".m4a"]
    audio_files = []

    for root, _, files in os.walk(input_dir):
        for file in files:
            file_path = os.path.join(root, file)
            file_ext = os.path.splitext(file)[1].lower()
            if file_ext in audio_extensions:
                audio_files.append(file_path)

    print(f"找到 {len(audio_files)} 个音频文件。")

    # 处理每个音频文件
    successful = 0
    skipped = 0

    for audio_file in tqdm(audio_files, desc="转写音频", unit="文件"):
        # 检查输出文件是否已存在
        rel_path = os.path.relpath(audio_file, input_dir)
        rel_dir = os.path.dirname(rel_path)
        filename = os.path.basename(audio_file)
        base_name = os.path.splitext(filename)[0]
        txt_path = os.path.join(output_dir, rel_dir, f"{base_name}.csv")

        if os.path.exists(txt_path):
            print(f"跳过 {filename} (已存在转写文件)")
            skipped += 1
            continue

        # 转写音频
        result_path = transcribe_audio(audio_file, model, txt_path, language)
        print(f"已转写 {os.path.basename(audio_file)} -> {result_path}")
        successful += 1

    # 打印摘要
    print(f"\n转写摘要:")
    print(f"  总音频文件: {len(audio_files)}")
    print(f"  成功转写: {successful}")
    print(f"  已跳过 (已存在): {skipped}")