import os
import subprocess
import sys
import argparse
import time  # 添加time模块用于计时

import torch


def check_ffmpeg():
    """检查ffmpeg是否安装"""
    try:
        result = subprocess.run(["ffmpeg", "-version"],
                              capture_output=True, text=True, check=True)
        return True
    except (subprocess.CalledProcessError, FileNotFoundError):
        return False

def extract_audio_with_ffmpeg(video_path, audio_path):
    """
    使用ffmpeg从视频中提取音频

    Args:
        video_path (str): 视频文件路径
        audio_path (str): 输出音频文件路径
    """
    try:
        # 使用ffmpeg提取音频
        cmd = [
            "ffmpeg",
            "-i", video_path,           # 输入视频文件
            "-vn",                      # 禁用视频录制
            "-acodec", "mp3",          # 音频编解码器
            "-ab", "192k",             # 音频比特率
            "-ar", "44100",            # 音频采样率
            "-y",                      # 覆盖输出文件
            audio_path
        ]

        result = subprocess.run(cmd, capture_output=True, text=True, check=True)
        print(f"音频已提取至: {audio_path}")
        return True
    except subprocess.CalledProcessError as e:
        print(f"FFmpeg错误: {e.stderr}")
        return False
    except Exception as e:
        print(f"提取音频时出错: {e}")
        return False

def install_and_import_whisper():
    """
    尝试导入whisper，如果不存在则安装
    """
    try:
        import whisper
        return whisper
    except ImportError:
        print("未找到whisper库，正在安装...")
        try:
            subprocess.check_call([sys.executable, "-m", "pip", "install", "openai-whisper"])
            import whisper
            return whisper
        except Exception as e:
            print(f"安装whisper失败: {e}")
            return None

def transcribe_audio_with_whisper(audio_path, model_size="base", language=None):
    """
    使用Whisper将音频转录为文本

    Args:
        audio_path (str): 音频文件路径
        model_size (str): Whisper模型大小 (tiny, base, small, medium, large)
        language (str): 音频语言，如不指定则自动检测

    Returns:
        dict: 转录结果
    """
    try:
        # 导入whisper
        whisper = install_and_import_whisper()
        if whisper is None:
            raise Exception("无法导入或安装whisper库")

        print(f"正在加载Whisper {model_size} 模型...")
        # 修复 toto 错误为 to
        if torch.cuda.is_available():
            model = whisper.load_model(model_size).to("cuda")
            print("GPU 可用，使用指定模型")
        else:
            model = whisper.load_model(model_size)
            print("GPU 不可用，使用指定模型")

        print("正在进行音频转录...")
        # 记录转录开始时间
        transcribe_start_time = time.time()
        if language:
            result = model.transcribe(audio_path, language=language)
        else:
            result = model.transcribe(audio_path)
        # 计算转录耗时
        transcribe_duration = time.time() - transcribe_start_time
        print(f"转录完成! 耗时: {transcribe_duration:.2f} 秒")
        return result
    except Exception as e:
        print(f"转录过程中出错: {e}")
        raise

def process_video_to_text(video_path, model_size="base", language=None, output_path=None):
    """
    将视频文件转换为文本

    Args:
        video_path (str): 视频文件路径
        model_size (str): Whisper模型大小
        language (str): 音频语言
        output_path (str): 输出文本文件路径
    """
    # 记录总运行开始时间
    total_start_time = time.time()

    # 检查输入文件
    if not os.path.exists(video_path):
        print(f"错误: 视频文件 '{video_path}' 不存在")
        return None

    # 检查ffmpeg
    if not check_ffmpeg():
        print("错误: 未找到ffmpeg，请先安装ffmpeg")
        print("访问 https://ffmpeg.org/download.html 下载并安装")
        print("或者在Windows上可以访问 https://www.gyan.dev/ffmpeg/builds/ 下载预编译版本")
        return None

    # 创建临时音频文件路径
    video_dir = os.path.dirname(video_path)
    video_name = os.path.splitext(os.path.basename(video_path))[0]
    temp_audio_path = os.path.join(video_dir, f"{video_name}_temp_audio.mp3")

    try:
        # 1. 提取音频
        print("正在从视频中提取音频...")
        # 记录音频提取开始时间
        extract_start_time = time.time()
        if not extract_audio_with_ffmpeg(video_path, temp_audio_path):
            return None
        # 计算音频提取耗时
        extract_duration = time.time() - extract_start_time
        print(f"音频提取完成! 耗时: {extract_duration:.2f} 秒")

        # 2. 使用Whisper转录音频
        print("开始音频转录...")
        result = transcribe_audio_with_whisper(temp_audio_path, model_size, language)

        # 3. 输出结果
        if output_path:
            # 保存到文件
            with open(output_path, 'w', encoding='utf-8') as f:
                f.write(result['text'])
            print(f"转录文本已保存至: {output_path}")
        else:
            # 直接打印到控制台
            print("\n" + "="*60)
            print("转录结果:")
            print("="*60)
            print(result['text'])
            print("="*60)

            # 打印详细的时间戳信息
            print("\n详细时间戳信息:")
            print("-"*40)
            if 'segments' in result:
                for segment in result['segments']:
                    start_time = segment['start']
                    end_time = segment['end']
                    text = segment['text']
                    print(f"[{start_time:>6.2f}s -> {end_time:>6.2f}s] {text}")

        return result

    except Exception as e:
        print(f"处理失败: {e}")
        return None
    finally:
        # 清理临时音频文件
        if os.path.exists(temp_audio_path):
            try:
                os.remove(temp_audio_path)
                print(f"\n已清理临时文件: {temp_audio_path}")
            except Exception as e:
                print(f"清理临时文件失败: {e}")

        # 计算总运行时间
        total_duration = time.time() - total_start_time
        print(f"总运行时间: {total_duration:.2f} 秒")

def main():
    parser = argparse.ArgumentParser(description="使用Whisper将MP4视频中的音频提取成文本")
    parser.add_argument("video_path", help="输入的视频文件路径")
    parser.add_argument("-m", "--model", default="base",
                        choices=["tiny", "base", "small", "medium", "large", "large-v2", "large-v3"],
                        help="Whisper模型大小 (默认: base)")
    parser.add_argument("-l", "--language", help="音频语言 (默认: 自动检测)")
    parser.add_argument("-o", "--output", help="输出文本文件路径")

    args = parser.parse_args()

    process_video_to_text(
        video_path=args.video_path,
        model_size=args.model,
        language=args.language,
        output_path=args.output
    )

# 简单使用示例
def example_usage():
    """
    使用示例
    """
    examples = '''
使用示例:

1. 基本用法:
   python video_to_text.py video.mp4

2. 使用大型模型以获得更好的准确性:
   python video_to_text.py video.mp4 -m large

3. 指定语言（例如中文）:
   python video_to_text.py video.mp4 -l zh

4. 保存结果到文件:
   python video_to_text.py video.mp4 -o transcript.txt

5. 使用medium模型并指定中文输出到文件:
   python video_to_text.py video.mp4 -m medium -l zh -o result.txt
    '''
    print(examples)

if __name__ == "__main__":
    # 如果没有命令行参数，显示使用示例
    if len(sys.argv) == 1:
        example_usage()
    else:
        main()
