#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Whisper RTX 4090优化语音识别脚本
专门针对RTX 4090优化，充分利用24GB显存和sm_89计算能力
支持MP3音频文件 - GPU稳定版本 + 错误修复版
"""

import sys
import os
import json
import subprocess
import warnings
import multiprocessing
import time
import gc
import shutil
import shutil

# 设置系统编码
if sys.platform.startswith('win'):
    # Windows系统编码设置
    import codecs
    sys.stdout = codecs.getwriter('utf-8')(sys.stdout.detach())
    sys.stderr = codecs.getwriter('utf-8')(sys.stderr.detach())
    os.environ['PYTHONIOENCODING'] = 'utf-8'

# 完全禁用所有警告
warnings.filterwarnings("ignore")

# 设置环境变量优化内存使用
os.environ['OMP_NUM_THREADS'] = '1'
os.environ['MKL_NUM_THREADS'] = '1'
os.environ['NUMEXPR_NUM_THREADS'] = '1'

# 重定向所有输出到stderr，只保留JSON输出到stdout
def log_to_stderr(message):
    """将日志信息输出到stderr，不影响stdout的JSON输出"""
    try:
        print(message, file=sys.stderr)
        sys.stderr.flush()
    except:
        pass

def safe_import_torch():
    """安全导入torch，避免导入错误导致崩溃"""
    try:
        import torch
        return torch
    except Exception as e:
        log_to_stderr(f"[ERROR] 导入torch失败: {e}")
        return None

def safe_import_whisper():
    """安全导入whisper，避免导入错误导致崩溃"""
    try:
        import whisper
        return whisper
    except Exception as e:
        log_to_stderr(f"[ERROR] 导入whisper失败: {e}")
        return None

def clear_whisper_cache():
    """清理Whisper缓存，解决tiktoken错误"""
    try:
        # 获取Whisper缓存目录
        import whisper
        cache_dir = whisper._download.whisper._CACHE_DIR
        if os.path.exists(cache_dir):
            log_to_stderr(f"[CACHE] 清理Whisper缓存: {cache_dir}")
            shutil.rmtree(cache_dir, ignore_errors=True)
            time.sleep(2)  # 等待清理完成
            return True
    except Exception as e:
        log_to_stderr(f"[WARNING] 清理缓存失败: {e}")
    
    # 备用清理方法
    try:
        user_home = os.path.expanduser("~")
        cache_paths = [
            os.path.join(user_home, ".cache", "whisper"),
            os.path.join(user_home, "AppData", "Local", "whisper"),
            os.path.join(user_home, ".whisper")
        ]
        
        for cache_path in cache_paths:
            if os.path.exists(cache_path):
                log_to_stderr(f"[CACHE] 清理备用缓存: {cache_path}")
                shutil.rmtree(cache_path, ignore_errors=True)
        
        return True
    except Exception as e:
        log_to_stderr(f"[WARNING] 备用清理失败: {e}")
        return False

def clear_whisper_cache():
    """清理Whisper缓存，解决tiktoken错误"""
    try:
        # 获取Whisper缓存目录
        import whisper
        cache_dir = whisper._download.whisper._CACHE_DIR
        if os.path.exists(cache_dir):
            log_to_stderr(f"[CACHE] 清理Whisper缓存: {cache_dir}")
            shutil.rmtree(cache_dir, ignore_errors=True)
            time.sleep(2)  # 等待清理完成
            return True
    except Exception as e:
        log_to_stderr(f"[WARNING] 清理缓存失败: {e}")
    
    # 备用清理方法
    try:
        user_home = os.path.expanduser("~")
        cache_paths = [
            os.path.join(user_home, ".cache", "whisper"),
            os.path.join(user_home, "AppData", "Local", "whisper"),
            os.path.join(user_home, ".whisper")
        ]
        
        for cache_path in cache_paths:
            if os.path.exists(cache_path):
                log_to_stderr(f"[CACHE] 清理备用缓存: {cache_path}")
                shutil.rmtree(cache_path, ignore_errors=True)
        
        return True
    except Exception as e:
        log_to_stderr(f"[WARNING] 备用清理失败: {e}")
        return False

def check_gpu_memory():
    """检查GPU显存状态"""
    try:
        torch = safe_import_torch()
        if torch and torch.cuda.is_available():
            gpu_memory = torch.cuda.get_device_properties(0).total_memory / 1024**3
            allocated = torch.cuda.memory_allocated(0) / 1024**3
            cached = torch.cuda.memory_reserved(0) / 1024**3
            
            log_to_stderr(f"[GPU] 显存状态: 总计 {gpu_memory:.1f}GB, 已分配 {allocated:.1f}GB, 已缓存 {cached:.1f}GB")
            
            # 如果显存使用超过80%，清理缓存
            if allocated > gpu_memory * 0.8:
                log_to_stderr("[GPU] 显存使用过高，清理缓存...")
                torch.cuda.empty_cache()
                gc.collect()
                return False
            return True
    except:
        pass
    return False

def force_gpu_cleanup():
    """强制GPU清理"""
    try:
        torch = safe_import_torch()
        if torch and torch.cuda.is_available():
            log_to_stderr("[GPU] 强制清理GPU内存...")
            torch.cuda.empty_cache()
            torch.cuda.synchronize()
            gc.collect()
            time.sleep(1)  # 等待清理完成
    except:
        pass

def optimize_mp3_audio(mp3_path, wav_path):
    """优化MP3音频转换，针对语音识别优化"""
    try:
        cmd = [
            'ffmpeg', '-y', '-i', mp3_path,
            '-ar', '16000',        # 采样率16kHz，Whisper推荐
            '-ac', '1',            # 单声道，减少计算量
            '-f', 'wav',           # 输出WAV格式
            '-loglevel', 'quiet',  # 静默模式
            '-threads', '1',       # 限制线程数，避免内存问题
            '-af', 'highpass=f=200,lowpass=f=8000',  # 高通200Hz，低通8kHz，优化语音频段
            wav_path
        ]
        result = subprocess.run(cmd, check=True, capture_output=True, timeout=30)
        return True
    except subprocess.TimeoutExpired:
        log_to_stderr("[ERROR] 音频转换超时")
        return False
    except Exception as e:
        log_to_stderr(f"[ERROR] 音频转换失败: {e}")
        return False

def load_model_rtx4090():
    """RTX 4090优化模型加载 - 稳定GPU版本 + 错误修复"""
    global _model, _device, _device_name
    
    if '_model' not in globals():
        try:
            # 安全导入
            torch = safe_import_torch()
            whisper = safe_import_whisper()
            
            if torch is None or whisper is None:
                return None
            
            # 强制GPU清理
            force_gpu_cleanup()
            
            # 检查GPU
            if torch.cuda.is_available() and torch.cuda.device_count() > 0:
                gpu_name = torch.cuda.get_device_name(0)
                log_to_stderr(f"[GPU] 检测到GPU: {gpu_name}")
                
                # 检查显存状态
                if not check_gpu_memory():
                    log_to_stderr("[GPU] 显存不足，尝试CPU模式...")
                    _model = load_model_with_retry(whisper, "cpu")
                    if _model:
                        _device = "cpu"
                        _device_name = "CPU"
                    return _model
                
                # RTX 4090优化设置
                torch.cuda.set_device(0)
                torch.backends.cudnn.benchmark = True
                torch.backends.cudnn.deterministic = False
                
                # 设置显存分配策略 - 保守策略，避免崩溃
                torch.cuda.set_per_process_memory_fraction(0.7)  # 使用70%显存
                
                # 预热GPU - 更温和的方式
                log_to_stderr("[GPU] 预热GPU...")
                try:
                    # 分步预热，避免一次性分配大量内存
                    for size in [100, 200, 300]:
                        dummy_tensor = torch.randn(size, size).cuda()
                        torch.cuda.synchronize()
                        del dummy_tensor
                        torch.cuda.empty_cache()
                        time.sleep(0.1)
                except Exception as e:
                    log_to_stderr(f"[WARNING] GPU预热失败: {e}")
                    # 预热失败，降级到CPU
                    log_to_stderr("[GPU] 降级到CPU模式...")
                    _model = load_model_with_retry(whisper, "cpu")
                    if _model:
                        _device = "cpu"
                        _device_name = "CPU"
                    return _model
                
                # 加载medium模型到GPU
                log_to_stderr("[GPU] 加载Whisper medium模型到GPU...")
                try:
                    _model = load_model_with_retry(whisper, "cuda")
                    if _model:
                        _device = "cuda"
                        _device_name = gpu_name
                        log_to_stderr(f"[GPU] 模型成功加载到GPU: {gpu_name}")
                        return _model
                    else:
                        # GPU加载失败，降级到CPU
                        log_to_stderr("[GPU] 降级到CPU模式...")
                        _model = load_model_with_retry(whisper, "cpu")
                        if _model:
                            _device = "cpu"
                            _device_name = "CPU"
                        return _model
                except Exception as e:
                    log_to_stderr(f"[ERROR] GPU模型加载失败: {e}")
                    # GPU加载失败，降级到CPU
                    log_to_stderr("[GPU] 降级到CPU模式...")
                    _model = load_model_with_retry(whisper, "cpu")
                    if _model:
                        _device = "cpu"
                        _device_name = "CPU"
                    return _model
                
            else:
                log_to_stderr("[CPU] GPU不可用，使用CPU模式")
                _model = load_model_with_retry(whisper, "cpu")
                if _model:
                    _device = "cpu"
                    _device_name = "CPU"
                return _model
            
        except Exception as e:
            log_to_stderr(f"[ERROR] 模型加载失败: {e}")
            return None
    
    return _model

def load_model_with_retry(whisper, device, max_retries=3):
    """带重试机制的模型加载，解决tiktoken错误"""
    for attempt in range(max_retries):
        try:
            log_to_stderr(f"[MODEL] 尝试加载模型 (第{attempt + 1}次) 到 {device}...")
            
            # 如果是第一次失败，清理缓存
            if attempt > 0:
                log_to_stderr("[CACHE] 清理模型缓存...")
                clear_whisper_cache()
                time.sleep(2)
            
            # 加载模型
            model = whisper.load_model("medium", device=device)
            log_to_stderr(f"[SUCCESS] 模型加载成功到 {device}")
            return model
            
        except Exception as e:
            log_to_stderr(f"[ERROR] 第{attempt + 1}次加载失败: {e}")
            
            # 如果是tiktoken错误，清理缓存重试
            if "tiktoken" in str(e) or "Encoder and decoder" in str(e):
                log_to_stderr("[FIX] 检测到tiktoken错误，清理缓存...")
                clear_whisper_cache()
                time.sleep(3)
            
            # 最后一次尝试失败
            if attempt == max_retries - 1:
                log_to_stderr(f"[FATAL] 模型加载失败，已尝试{max_retries}次")
                return None
            
            time.sleep(2)  # 等待后重试
    
    return None

def transcribe_rtx4090(model, audio_path):
    """RTX 4090优化语音识别"""
    try:
        # 重定向stdout来隐藏进度条
        import io
        import contextlib
        
        start_time = time.time()
        
        # 捕获所有输出
        f = io.StringIO()
        with contextlib.redirect_stdout(f), contextlib.redirect_stderr(f):
            if _device == "cuda":
                # RTX 4090 GPU优化参数
                result = model.transcribe(
                    audio_path,
                    language="zh",
                    verbose=False,
                    temperature=0.0,
                    fp16=True,  # RTX 4090支持FP16，性能更好
                    compression_ratio_threshold=2.4,
                    logprob_threshold=-1.0,
                    no_speech_threshold=0.6,
                    condition_on_previous_text=True,
                    initial_prompt="这是一段中文语音。"
                )
                
                # GPU同步
                try:
                    torch.cuda.synchronize()
                except:
                    pass
                
            else:
                # CPU模式参数
                result = model.transcribe(
                    audio_path,
                    language="zh",
                    verbose=False,
                    temperature=0.0,
                    fp16=False,
                    compression_ratio_threshold=2.4,
                    logprob_threshold=-1.0,
                    no_speech_threshold=0.6,
                    condition_on_previous_text=True,
                    initial_prompt="这是一段中文语音。"
                )
        
        end_time = time.time()
        processing_time = end_time - start_time
        
        # 添加处理时间到结果
        if result:
            result['processing_time'] = round(processing_time, 2)
        
        return result
        
    except Exception as e:
        log_to_stderr(f"[ERROR] 识别失败: {e}")
        return None

def server_loop():
    """长驻进程模式：一次加载模型，多次识别，避免重复加载带来的时延"""
    try:
        # 预加载模型
        log_to_stderr("[SERVER] 正在预加载Whisper模型...")
        model = load_model_rtx4090()
        if model is None:
            log_to_stderr("[SERVER] 模型加载失败")
            return
        log_to_stderr("[SERVER] 模型加载完成，等待请求...")

        # 从stdin读取一行一请求，JSON格式：{"id": "...", "path": "..."}
        for line in sys.stdin:
            line = line.strip()
            if not line:
                continue
            try:
                request = json.loads(line)
                req_id = request.get("id")
                audio_path = request.get("path")
                if not audio_path or not os.path.exists(audio_path):
                    response = {"id": req_id, "results": [{"error": "文件不存在"}]}
                    print(json.dumps(response, ensure_ascii=False), flush=True)
                    continue

                # 处理MP3格式，转换为优化WAV
                wav_path = None
                audio_path_for_asr = audio_path
                if audio_path.lower().endswith('.mp3'):
                    wav_path = audio_path + '.optimized.wav'
                    if not optimize_mp3_audio(audio_path, wav_path):
                        response = {"id": req_id, "results": [{"error": "MP3转换失败"}]}
                        print(json.dumps(response, ensure_ascii=False), flush=True)
                        continue
                    audio_path_for_asr = wav_path

                result = transcribe_rtx4090(model, audio_path_for_asr)
                if result and 'text' in result and result['text'].strip():
                    text = result['text'].strip()
                    output = {
                        "text": text,
                        "device": _device,
                        "device_name": _device_name,
                        "processing_time": result.get('processing_time', 0),
                        "audio_format": "MP3" if audio_path.lower().endswith('.mp3') else os.path.splitext(audio_path)[1].lstrip('.').upper(),
                        "model": "Whisper Medium"
                    }
                    response = {"id": req_id, "results": [output]}
                else:
                    response = {"id": req_id, "results": []}

                print(json.dumps(response, ensure_ascii=False), flush=True)
            except Exception as e:
                # 返回错误但不中断服务
                try:
                    req_id = None
                    try:
                        req_id = json.loads(line).get("id")
                    except Exception:
                        pass
                    error_resp = {"id": req_id, "results": [{"error": f"识别失败: {str(e)}"}]}
                    print(json.dumps(error_resp, ensure_ascii=False), flush=True)
                except Exception:
                    pass
            finally:
                # 清理临时wav
                try:
                    if 'wav_path' in locals() and wav_path and os.path.exists(wav_path):
                        os.remove(wav_path)
                except Exception:
                    pass
    except Exception as e:
        log_to_stderr(f"[SERVER] 致命错误: {e}")


def main():
    try:
        if len(sys.argv) < 2:
            log_to_stderr("使用方法: python whisper_rtx4090.py <MP3音频文件>")
            print(json.dumps([{"error": "参数错误"}], ensure_ascii=False))
            sys.exit(1)

        audio_path = sys.argv[1]
        
        if not os.path.exists(audio_path):
            # 输出错误JSON到stdout
            print(json.dumps([{"error": "文件不存在"}], ensure_ascii=False))
            sys.exit(1)

        log_to_stderr(f"[AUDIO] 处理MP3音频文件: {audio_path}")
        
        # 处理MP3格式，转换为优化的WAV
        wav_path = None
        if audio_path.lower().endswith('.mp3'):
            wav_path = audio_path + '.optimized.wav'
            log_to_stderr("[CONVERT] 转换MP3到优化WAV格式...")
            if not optimize_mp3_audio(audio_path, wav_path):
                print(json.dumps([{"error": "MP3转换失败"}], ensure_ascii=False))
                sys.exit(1)
            audio_path_for_asr = wav_path
            log_to_stderr(f"[CONVERT] 转换完成: {wav_path}")
        else:
            log_to_stderr("[WARNING] 检测到非MP3文件，直接处理...")
            audio_path_for_asr = audio_path

        # 加载模型
        log_to_stderr("[MODEL] 加载Whisper模型...")
        model = load_model_rtx4090()
        if model is None:
            print(json.dumps([{"error": "模型加载失败"}], ensure_ascii=False))
            sys.exit(1)

        try:
            log_to_stderr("[RECOGNIZE] 开始语音识别...")
            # 执行识别
            result = transcribe_rtx4090(model, audio_path_for_asr)
            
            # 处理结果
            if result and 'text' in result:
                text = result['text'].strip()
                if text:
                    # 输出结果和设备信息到stdout（JSON格式）
                    output = {
                        "text": text,
                        "device": _device,
                        "device_name": _device_name,
                        "processing_time": result.get('processing_time', 0),
                        "audio_format": "MP3",
                        "model": "Whisper Medium"
                    }
                    log_to_stderr(f"[SUCCESS] 识别完成! 用时: {output['processing_time']}秒")
                    log_to_stderr(f"[DEVICE] 使用设备: {output['device_name']}")
                    log_to_stderr(f"[TEXT] 识别文本: {text}")
                    
                    # 确保JSON输出使用正确的编码
                    json_output = json.dumps([output], ensure_ascii=False, separators=(',', ':'))
                    print(json_output)
                else:
                    log_to_stderr("[WARNING] 未识别到语音内容")
                    print(json.dumps([], ensure_ascii=False))
            else:
                log_to_stderr("[ERROR] 识别失败")
                print(json.dumps([], ensure_ascii=False))

        except Exception as e:
            log_to_stderr(f"[ERROR] 识别异常: {str(e)}")
            print(json.dumps([{"error": f"识别失败: {str(e)}"}], ensure_ascii=False))

        finally:
            # 清理临时文件
            if wav_path and os.path.exists(wav_path):
                try:
                    os.remove(wav_path)
                    log_to_stderr(f"[CLEANUP] 已清理临时文件: {wav_path}")
                except:
                    pass
            
            # GPU清理
            try:
                if 'torch' in globals() and torch.cuda.is_available():
                    torch.cuda.empty_cache()
                    gc.collect()
            except:
                pass

    except Exception as e:
        log_to_stderr(f"[FATAL] 程序异常: {str(e)}")
        print(json.dumps([{"error": f"程序异常: {str(e)}"}], ensure_ascii=False))
        sys.exit(1)

if __name__ == "__main__":
    if any(arg in sys.argv for arg in ["--server", "-s"]):
        server_loop()
    else:
        main()
