#!/usr/bin/env python3
import sys
import json
import os
import time
import threading
import signal
import psutil

try:
    import whisper
    import torch
    import gc
except ImportError as e:
    print(f"Error: Missing required packages. Please install: pip install openai-whisper torch")
    sys.exit(1)

# 全局模型缓存
_cached_model = None
_cached_model_size = None
_cached_device = None
_process_count = 0
_start_time = time.time()

# 内存监控
MEMORY_THRESHOLD_MB = 3000  # 3GB 内存阈值
PROCESS_COUNT_THRESHOLD = 50  # 处理50个文件后重新评估内存

def get_device_info():
    """获取设备信息并选择最佳设备"""
    device_info = {
        'device': 'cpu',
        'device_name': 'CPU',
        'memory': 'N/A'
    }
    
    if torch.cuda.is_available():
        device_info['device'] = 'cuda'
        device_info['device_name'] = torch.cuda.get_device_name(0)
        device_info['memory'] = f"{torch.cuda.get_device_properties(0).total_memory / 1024**3:.1f}GB"
        
        # 清理 GPU 内存
        torch.cuda.empty_cache()
        gc.collect()
        
    return device_info

def get_memory_usage():
    """获取当前进程内存使用情况"""
    process = psutil.Process()
    memory_info = process.memory_info()
    return {
        'rss_mb': memory_info.rss / 1024 / 1024,
        'vms_mb': memory_info.vms / 1024 / 1024,
        'percent': process.memory_percent()
    }

def cleanup_model():
    """清理模型内存"""
    global _cached_model, _cached_model_size, _cached_device
    
    if _cached_model is not None:
        print(f"🧹 Cleaning up model memory", file=sys.stderr)
        del _cached_model
        _cached_model = None
        _cached_model_size = None
        _cached_device = None
        
        # GPU 内存清理
        if torch.cuda.is_available():
            torch.cuda.empty_cache()
        
        gc.collect()
        print(f"✅ Model memory cleaned", file=sys.stderr)

def should_restart_due_to_memory():
    """检查是否应该由于内存使用过高而重启"""
    memory_usage = get_memory_usage()
    if memory_usage['rss_mb'] > MEMORY_THRESHOLD_MB:
        print(f"⚠️  Memory usage too high: {memory_usage['rss_mb']:.1f}MB", file=sys.stderr)
        return True
    return False

def transcribe_audio_request(request_data):
    """处理音频转录请求"""
    global _cached_model, _cached_model_size, _cached_device, _process_count
    
    try:
        audio_file = request_data['audio_file']
        model_size = request_data.get('model', 'base')
        language = request_data.get('language', 'zh')
        use_fp16 = not request_data.get('no_fp16', False)
        
        # 检查文件是否存在
        if not os.path.exists(audio_file):
            raise FileNotFoundError(f"Audio file not found: {audio_file}")
        
        # 增加处理计数
        _process_count += 1
        
        # 定期内存检查
        if _process_count % 10 == 0:
            memory_usage = get_memory_usage()
            print(f"📊 Memory usage after {_process_count} files: {memory_usage['rss_mb']:.1f}MB ({memory_usage['percent']:.1f}%)", file=sys.stderr)
            
            # 如果内存使用过高，清理模型
            if should_restart_due_to_memory():
                cleanup_model()
        
        # 获取设备信息
        device_info = get_device_info()
        device = device_info['device']
        
        # 检查是否需要重新加载模型
        need_reload = (_cached_model is None or 
                      _cached_model_size != model_size or 
                      _cached_device != device)
        
        if need_reload:
            # 如果已有模型，先清理
            if _cached_model is not None:
                cleanup_model()
            
            # 加载新模型
            print(f"📦 Loading Whisper model: {model_size} on {device.upper()}", file=sys.stderr)
            start_time = time.time()
            
            _cached_model = whisper.load_model(model_size, device=device)
            _cached_model_size = model_size
            _cached_device = device
            
            load_time = time.time() - start_time
            print(f"✅ Model loaded in {load_time:.2f}s", file=sys.stderr)
            
            # 显示设备信息
            if device == 'cuda':
                print(f"💾 GPU Memory: {device_info['memory']}", file=sys.stderr)
                memory_allocated = torch.cuda.memory_allocated() / 1024**3
                print(f"📊 GPU Memory allocated: {memory_allocated:.2f}GB", file=sys.stderr)
        else:
            print(f"♻️  Using cached {model_size} model on {device.upper()}", file=sys.stderr)
        
        model = _cached_model
        
        # GPU 模型优化
        if device == 'cuda' and use_fp16:
            try:
                # 测试 FP16 兼容性
                test_tensor = torch.randn(1, device=device).half()
                del test_tensor
                
                if not hasattr(model, '_fp16_enabled'):
                    model = model.half()
                    model._fp16_enabled = True
                    print("🔧 Enabled half-precision (FP16) for faster inference", file=sys.stderr)
            except Exception as fp16_error:
                print(f"⚠️  FP16 compatibility test failed, using FP32", file=sys.stderr)
                use_fp16 = False
        
        # 转录音频
        print(f"🎵 Transcribing: {os.path.basename(audio_file)}", file=sys.stderr)
        transcribe_start = time.time()
        
        # 转录参数
        transcribe_options = {
            'language': language,
            'initial_prompt': "以下是中文对话内容，请添加合适的标点符号。",
            'condition_on_previous_text': True,
            'compression_ratio_threshold': 2.4,
            'logprob_threshold': -1.0,
            'no_speech_threshold': 0.6,
            'word_timestamps': True,
        }
        
        # GPU 特定优化
        if device == 'cuda':
            transcribe_options.update({
                'beam_size': 5,
            })
        
        result = model.transcribe(audio_file, **transcribe_options)
        
        transcribe_time = time.time() - transcribe_start
        
        # 性能统计
        audio_duration = result.get('segments', [])
        if audio_duration:
            duration = audio_duration[-1]['end']
            speed_ratio = duration / transcribe_time if transcribe_time > 0 else 0
            print(f"⚡ Transcribed {duration:.1f}s audio in {transcribe_time:.2f}s ({speed_ratio:.1f}x realtime)", file=sys.stderr)
        
        # 定期 GPU 内存清理
        if device == 'cuda' and _process_count % 5 == 0:
            torch.cuda.empty_cache()
        
        return result["text"].strip()
        
    except Exception as e:
        print(f"❌ Transcription error: {str(e)}", file=sys.stderr)
        
        # 错误时清理 GPU 内存
        if torch.cuda.is_available():
            torch.cuda.empty_cache()
            gc.collect()
        
        raise e

def signal_handler(signum, frame):
    """处理退出信号"""
    print(f"🛑 Received signal {signum}, cleaning up...", file=sys.stderr)
    cleanup_model()
    sys.exit(0)

def main():
    """主循环：监听请求并处理"""
    # 注册信号处理器
    signal.signal(signal.SIGINT, signal_handler)
    signal.signal(signal.SIGTERM, signal_handler)
    
    print("🚀 Whisper persistent service started", file=sys.stderr)
    print(f"🖥️  Process ID: {os.getpid()}", file=sys.stderr)
    
    # 显示初始内存状态
    initial_memory = get_memory_usage()
    print(f"📊 Initial memory usage: {initial_memory['rss_mb']:.1f}MB", file=sys.stderr)
    
    try:
        while True:
            try:
                # 读取请求
                line = sys.stdin.readline()
                if not line:
                    break
                
                request_data = json.loads(line.strip())
                request_id = request_data.get('request_id')
                
                # 处理转录请求
                try:
                    result = transcribe_audio_request(request_data)
                    response = {
                        'request_id': request_id,
                        'result': result,
                        'error': None
                    }
                except Exception as e:
                    response = {
                        'request_id': request_id,
                        'result': None,
                        'error': str(e)
                    }
                
                # 发送响应
                print(json.dumps(response), flush=True)
                
                # 检查是否需要重启（内存过高或处理文件过多）
                if (_process_count >= PROCESS_COUNT_THRESHOLD and should_restart_due_to_memory()):
                    print(f"🔄 Restarting due to high memory usage after {_process_count} files", file=sys.stderr)
                    cleanup_model()
                    break
                    
            except json.JSONDecodeError as e:
                print(f"❌ Invalid JSON request: {e}", file=sys.stderr)
                continue
            except EOFError:
                break
            except Exception as e:
                print(f"❌ Unexpected error in main loop: {e}", file=sys.stderr)
                continue
                
    except KeyboardInterrupt:
        print("🛑 Keyboard interrupt received", file=sys.stderr)
    finally:
        cleanup_model()
        runtime = time.time() - _start_time
        print(f"📈 Service stats: {_process_count} files processed in {runtime:.1f}s", file=sys.stderr)
        print("👋 Whisper persistent service stopped", file=sys.stderr)

if __name__ == "__main__":
    main()