#!/usr/bin/env python3
import sys
import argparse
import os
import time

try:
    import whisper
    import torch
    import gc
except ImportError as e:
    print(f"Error: Missing required packages. Please install: pip install openai-whisper torch")
    sys.exit(1)

def get_device_info():
    """
    获取设备信息并选择最佳设备
    """
    device_info = {
        'device': 'cpu',
        'device_name': 'CPU',
        'memory': 'N/A'
    }
    
    if torch.cuda.is_available():
        device_info['device'] = 'cuda'
        device_info['device_name'] = torch.cuda.get_device_name(0)
        device_info['memory'] = f"{torch.cuda.get_device_properties(0).total_memory / 1024**3:.1f}GB"
        
        # 清理 GPU 内存
        torch.cuda.empty_cache()
        gc.collect()
        
    return device_info

# 全局模型缓存，避免重复加载
_cached_model = None
_cached_model_size = None
_cached_device = None

def transcribe_audio(audio_file, model_size="base", language="zh", use_fp16=True):
    """
    使用本地 Whisper 模型转录音频文件（支持 GPU 加速，带模型缓存）
    Args:
        audio_file: 音频文件路径
        model_size: 模型大小
        language: 语言代码
        use_fp16: 是否使用半精度推理（如果遇到问题可以设为 False）
    """
    global _cached_model, _cached_model_size, _cached_device
    
    try:
        # 检查文件是否存在
        if not os.path.exists(audio_file):
            raise FileNotFoundError(f"Audio file not found: {audio_file}")
        
        # 获取设备信息
        device_info = get_device_info()
        device = device_info['device']
        
        # 首次运行时显示设备信息
        if _cached_model is None:
            print(f"🖥️  Device: {device_info['device_name']} ({device.upper()})", file=sys.stderr)
            if device == 'cuda':
                print(f"💾 GPU Memory: {device_info['memory']}", file=sys.stderr)
                print(f"🚀 CUDA Version: {torch.version.cuda}", file=sys.stderr)
        
        # 检查是否需要重新加载模型
        need_reload = (_cached_model is None or 
                      _cached_model_size != model_size or 
                      _cached_device != device)
        
        if need_reload:
            # 如果已有模型，先清理
            if _cached_model is not None:
                print("🔄 Clearing previous model from memory", file=sys.stderr)
                del _cached_model
                if device == 'cuda':
                    torch.cuda.empty_cache()
                gc.collect()
            
            # 加载新模型
            print(f"📦 Loading Whisper model: {model_size}", file=sys.stderr)
            start_time = time.time()
            
            _cached_model = whisper.load_model(model_size, device=device)
            _cached_model_size = model_size
            _cached_device = device
            
            load_time = time.time() - start_time
            print(f"✅ Model loaded in {load_time:.2f}s", file=sys.stderr)
        else:
            print(f"♻️  Using cached model: {model_size}", file=sys.stderr)
        
        model = _cached_model
        
        # GPU 模型优化
        if device == 'cuda':
            # 显示 GPU 内存使用情况
            memory_allocated = torch.cuda.memory_allocated() / 1024**3
            print(f"📊 GPU Memory allocated: {memory_allocated:.2f}GB", file=sys.stderr)
            
            # 可选的 FP16 优化
            if use_fp16:
                try:
                    # 测试 FP16 兼容性
                    test_tensor = torch.randn(1, device=device).half()
                    del test_tensor  # 清理测试张量
                    
                    model = model.half()
                    print("🔧 Enabled half-precision (FP16) for faster inference", file=sys.stderr)
                except Exception as fp16_error:
                    print(f"⚠️  FP16 compatibility test failed: {str(fp16_error)[:100]}...", file=sys.stderr)
                    print("🔧 Fallback to full-precision (FP32) mode", file=sys.stderr)
                    use_fp16 = False  # 禁用 FP16
            else:
                print("🔧 Using full-precision (FP32) mode", file=sys.stderr)
        
        # 转录音频
        print(f"🎵 Transcribing: {os.path.basename(audio_file)}", file=sys.stderr)
        transcribe_start = time.time()
        
        # 转录参数优化
        transcribe_options = {
            'language': language,
            'initial_prompt': "以下是中文对话内容，请添加合适的标点符号，包括逗号、句号、问号、感叹号等。",  # 优化提示语要求添加标点
            'condition_on_previous_text': True,  # 基于前文优化转录
            'compression_ratio_threshold': 2.4,  # 压缩比阈值
            'logprob_threshold': -1.0,           # 对数概率阈值
            'no_speech_threshold': 0.6,          # 无语音阈值
            'word_timestamps': True,             # 启用单词级时间戳，有助于标点符号添加
        }
        
        # GPU 特定优化（移除可能有问题的 fp16 参数）
        if device == 'cuda':
            transcribe_options.update({
                'beam_size': 5,  # 束搜索大小
            })
        
        result = model.transcribe(audio_file, **transcribe_options)
        
        transcribe_time = time.time() - transcribe_start
        
        # 获取音频时长（近似）
        audio_duration = result.get('segments', [])
        if audio_duration:
            duration = audio_duration[-1]['end']
            speed_ratio = duration / transcribe_time if transcribe_time > 0 else 0
            print(f"⚡ Transcription completed in {transcribe_time:.2f}s", file=sys.stderr)
            print(f"📏 Audio duration: {duration:.2f}s", file=sys.stderr)
            print(f"🏃 Speed ratio: {speed_ratio:.1f}x realtime", file=sys.stderr)
        
        # GPU 内存清理
        if device == 'cuda':
            torch.cuda.empty_cache()
            gc.collect()
        
        # 输出转录结果
        print(result["text"])
        
    except Exception as e:
        print(f"Error during transcription: {str(e)}", file=sys.stderr)
        
        # GPU 内存清理（即使发生错误）
        if torch.cuda.is_available():
            torch.cuda.empty_cache()
            gc.collect()
        
        sys.exit(1)

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="Transcribe audio using local Whisper model with GPU acceleration")
    parser.add_argument("audio_file", help="Path to audio file")
    parser.add_argument("--model", default="base", help="Whisper model size (tiny, base, small, medium, large)")
    parser.add_argument("--language", default="zh", help="Language code")
    parser.add_argument("--no-fp16", action="store_true", help="Disable half-precision (FP16) mode")
    
    args = parser.parse_args()
    
    # 如果指定了 --no-fp16，则禁用 FP16
    use_fp16 = not args.no_fp16
    
    transcribe_audio(args.audio_file, args.model, args.language, use_fp16)