import os
import json
from pathlib import Path
import argparse
from tqdm import tqdm
from faster_whisper import WhisperModel

def transcribe_audio(audio_path, model, output_dir, language=None):
    """
    使用 faster-whisper 转写单个音频文件
    
    Parameters:
    audio_path (str): 音频文件路径
    model (WhisperModel): 已加载的 WhisperModel 实例
    output_dir (str): 输出文件夹路径
    language (str, optional): 音频语言，如果为 None 则自动检测
    
    Returns:
    str: 转写文本的路径
    """
    # 创建与音频文件相同路径的输出文件
    rel_path = os.path.relpath(audio_path, args.input_dir)
    rel_dir = os.path.dirname(rel_path)
    
    # 创建子文件夹（如果需要）
    output_subdir = os.path.join(output_dir, rel_dir)
    if rel_dir and not os.path.exists(output_subdir):
        os.makedirs(output_subdir)
    
    # 设置输出文件路径
    filename = os.path.basename(audio_path)
    base_name = os.path.splitext(filename)[0]
    txt_path = os.path.join(output_dir, rel_dir, f"{base_name}.txt")
    json_path = os.path.join(output_dir, rel_dir, f"{base_name}.json")
    
    # 如果输出文件已存在，则跳过
    if os.path.exists(txt_path) and os.path.exists(json_path):
        return txt_path
    try:
        # 使用 VAD 过滤和默认参数进行转写
        segments, info = model.transcribe(
            audio_path,
            language=language,
            vad_filter=True,
            vad_parameters=dict(
                min_silence_duration_ms=500,  # 可以根据需要调整这些参数
                speech_pad_ms=400
            ),
            word_timestamps=True,
            beam_size=5
        )
    except Exception as e:
        print(e)
        return txt_path
    # 收集所有段落
    segments_list = []
    full_text = ""
    
    for segment in segments:
        segment_dict = {
            "id": segment.id,
            "start": segment.start,
            "end": segment.end,
            "text": segment.text,
            "words": [{"word": w.word, "start": w.start, "end": w.end, "probability": w.probability} for w in segment.words]
        }
        segments_list.append(segment_dict)
        full_text += segment.text + " "
    
    # 保存完整文本到 .txt 文件
    with open(txt_path, "w", encoding="utf-8") as f:
        f.write(full_text.strip())
    
    # 保存详细信息到 .json 文件
    result = {
        "text": full_text.strip(),
        "language": info.language,
        "language_probability": info.language_probability,
        "segments": segments_list
    }
    
    with open(json_path, "w", encoding="utf-8") as f:
        json.dump(result, f, ensure_ascii=False, indent=2)
    
    return txt_path

def process_audio_files(input_dir, output_dir, model_size="large-v3", device="cuda", compute_type="float16", language=None):
    """
    处理文件夹中的所有音频文件
    
    Parameters:
    input_dir (str): 输入音频文件夹路径
    output_dir (str): 输出文本文件夹路径
    model_size (str): Whisper 模型大小
    device (str): 运行设备 ("cuda" 或 "cpu")
    compute_type (str): 计算精度
    language (str, optional): 音频语言，如果为 None 则自动检测
    """
    # 创建输出目录（如果不存在）
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    
    # 加载 Whisper 模型
    print(f"正在加载 {model_size} 模型...")
    model = WhisperModel(model_size, device=device, compute_type=compute_type)
    print("模型加载完成。")
    
    # 查找所有音频文件
    audio_extensions = ['.mp3', '.wav', '.flac', '.ogg', '.m4a']
    audio_files = []
    
    for root, _, files in os.walk(input_dir):
        for file in files:
            file_path = os.path.join(root, file)
            file_ext = os.path.splitext(file)[1].lower()
            if file_ext in audio_extensions:
                audio_files.append(file_path)
    
    print(f"找到 {len(audio_files)} 个音频文件。")
    
    # 处理每个音频文件
    successful = 0
    skipped = 0
    
    for audio_file in tqdm(audio_files, desc="转写音频", unit="文件"):
        # 检查输出文件是否已存在
        rel_path = os.path.relpath(audio_file, input_dir)
        rel_dir = os.path.dirname(rel_path)
        filename = os.path.basename(audio_file)
        base_name = os.path.splitext(filename)[0]
        txt_path = os.path.join(output_dir, rel_dir, f"{base_name}.txt")
        
        if os.path.exists(txt_path):
            print(f"跳过 {filename} (已存在转写文件)")
            skipped += 1
            continue
        
        # 转写音频
        result_path = transcribe_audio(audio_file, model, output_dir, language)
        print(f"已转写 {os.path.basename(audio_file)} -> {result_path}")
        successful += 1
    
    # 打印摘要
    print(f"\n转写摘要:")
    print(f"  总音频文件: {len(audio_files)}")
    print(f"  成功转写: {successful}")
    print(f"  已跳过 (已存在): {skipped}")

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="使用 faster-whisper 批量转写音频")
    parser.add_argument("--input_dir", type=str, required=True, help="输入音频文件夹路径")
    parser.add_argument("--output_dir", type=str, required=True, help="输出文本文件夹路径")
    parser.add_argument("--model_size", type=str, default="large-v3", help="Whisper 模型大小")
    parser.add_argument("--device", type=str, default="cuda", choices=["cuda", "cpu"], help="运行设备")
    parser.add_argument("--compute_type", type=str, default="float16", 
                        choices=["float16", "int8_float16", "int8"], help="计算精度")
    parser.add_argument("--language", type=str, default=None, help="音频语言 (如果不指定则自动检测)")
    
    args = parser.parse_args()
    
    process_audio_files(
        args.input_dir,
        args.output_dir,
        args.model_size,
        args.device,
        args.compute_type,
        args.language
    )