import os
import subprocess
import whisper
import argparse

def extract_audio(video_path: str, audio_path: str, sr: int = 16000):
    """
    用 ffmpeg 从视频中抽取音频，转成单声道、指定采样率的 wav 文件
    """
    cmd = [
        "ffmpeg",
        "-y",
        "-i", video_path,
        "-ac", "1",           # 单声道
        "-ar", str(sr),        # 采样率
        "-vn",                 # 不要视频
        "-codec:a", "pcm_s16le",
        audio_path
    ]
    try:
        subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    except subprocess.CalledProcessError as e:
        print(f"ffmpeg error for {video_path}: {e.stderr.decode('utf-8', errors='ignore')}")
        raise

def transcribe_audio(model, audio_path: str, language: str = "zh"):
    """
    调用 whisper 模型做转写，返回结果 dict
    """
    result = model.transcribe(audio_path, language=language, task="transcribe", verbose=False)
    return result

def save_transcription(result: dict, out_txt: str, out_srt: str):
    """
    把 whisper 的结果保存为全文 txt 和带时间戳的 srt 风格文本
    """
    segments = result.get("segments", [])
    full = result.get("text", "")
    with open(out_txt, "w", encoding="utf-8") as f:
        f.write(full + "\n")
    with open(out_srt, "w", encoding="utf-8") as f:
        for seg in segments:
            start = seg["start"]
            end = seg["end"]
            text = seg["text"].strip()
            f.write(f"{start:.2f} --> {end:.2f}: {text}\n")

def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--video_dir", type=str, default="videos", help="视频目录路径")
    parser.add_argument("--model_name", type=str, default="large", help="whisper 模型名称")
    parser.add_argument("--language", type=str, default="zh", help="识别语言代码，如 zh")
    args = parser.parse_args()

    video_dir = args.video_dir
    model = whisper.load_model(args.model_name)

    # 收集所有 mp4 文件
    video_files = []
    for root, dirs, files in os.walk(video_dir):
        for fname in files:
            if fname.lower().endswith(".mp4"):
                video_files.append(os.path.join(root, fname))

    total = len(video_files)
    processed = 0

    for idx, video_path in enumerate(video_files, 1):
        base, _ = os.path.splitext(os.path.basename(video_path))
        out_txt = os.path.join(os.path.dirname(video_path), base + "_whisper.txt")
        out_srt = os.path.join(os.path.dirname(video_path), base + "_whisper.srt")
        audio_path = os.path.join(os.path.dirname(video_path), base + "_audio.wav")

        # 跳过已处理（txt 和 srt 都存在即跳过）
        if os.path.exists(out_txt) and os.path.exists(out_srt):
            print(f"[{idx}/{total}] 已处理，跳过: {video_path}")
            continue

        print(f"[{idx}/{total}] 处理: {video_path}")
        extract_audio(video_path, audio_path)
        result = transcribe_audio(model, audio_path, language=args.language)
        save_transcription(result, out_txt, out_srt)
        print(f"已保存: {out_txt}, {out_srt}")
        processed += 1

    print(f"处理完成：{processed} / {total} 个视频文件")

if __name__ == "__main__":
    main()
