#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
增强版语音识别脚本 - 专门解决Windows编码问题
"""

import sys
import os

# 在导入其他模块之前设置编码
def setup_encoding():
    """设置系统编码，解决Windows乱码问题"""
    if sys.platform.startswith('win'):
        # Windows环境下的编码修复
        import codecs
        
        # 设置标准输出和错误的编码
        if hasattr(sys.stdout, 'buffer'):
            sys.stdout = codecs.getwriter('utf-8')(sys.stdout.buffer)
        if hasattr(sys.stderr, 'buffer'):
            sys.stderr = codecs.getwriter('utf-8')(sys.stderr.buffer)
        
        # 设置环境变量
        os.environ['PYTHONIOENCODING'] = 'utf-8'
        os.environ['PYTHONLEGACYWINDOWSSTDIO'] = 'utf-8'
        
        # 尝试设置控制台代码页
        try:
            import subprocess
            subprocess.run(['chcp', '65001'], shell=True, capture_output=True)
        except:
            pass
    else:
        # 非Windows环境
        import io
        if hasattr(sys.stdout, 'buffer'):
            sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
        if hasattr(sys.stderr, 'buffer'):
            sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding='utf-8')

# 立即设置编码
setup_encoding()

import json
import logging
import subprocess
import soundfile as sf
import numpy as np
import time
import warnings

# 忽略所有Whisper相关警告
warnings.filterwarnings("ignore", message="FP16 is not supported on CPU; using FP32 instead")
warnings.filterwarnings("ignore", category=UserWarning, module="whisper")
warnings.filterwarnings("ignore", category=FutureWarning)

def safe_print(message, file=sys.stderr):
    """安全的打印函数，确保编码正确"""
    try:
        if isinstance(message, str):
            print(message, file=file)
        else:
            print(str(message), file=file)
    except UnicodeEncodeError:
        # 如果编码失败，尝试使用ASCII
        try:
            print(message.encode('ascii', 'ignore').decode('ascii'), file=file)
        except:
            print("Encoding error", file=file)

def convert_webm_to_wav(webm_path, wav_path):
    """优化的音频转换"""
    cmd = [
        'ffmpeg', '-y', '-i', webm_path,
        '-ar', '16000', '-ac', '1', '-f', 'wav', 
        '-loglevel', 'error',  # 减少日志输出
        '-threads', '1',  # 单线程处理
        wav_path
    ]
    try:
        subprocess.run(cmd, check=True, capture_output=True)
        return True
    except subprocess.CalledProcessError as e:
        safe_print(f"音频转换失败: {e}")
        return False

def load_model_once():
    """全局模型加载，避免重复加载"""
    global _model_instance
    if '_model_instance' not in globals():
        safe_print("正在加载语音识别模型...")
        try:
            # 使用whisper模型
            import whisper
            
            # 设置环境变量来抑制警告
            os.environ['WHISPER_CPU_FP16_WARNING'] = '0'
            os.environ['CUDA_VISIBLE_DEVICES'] = ''  # 强制使用CPU
            os.environ['TOKENIZERS_PARALLELISM'] = 'false'  # 避免并行化警告
            
            # 使用base模型，更快且更稳定
            _model_instance = whisper.load_model("base")
            safe_print("Whisper模型加载完成")
            return _model_instance
        except ImportError:
            safe_print("错误: 请先安装whisper库: pip install openai-whisper")
            return None
        except Exception as e:
            safe_print(f"模型加载失败: {e}")
            return None
    return _model_instance

def main():
    """主函数"""
    if len(sys.argv) < 2:
        safe_print("Usage: python speech_recognize_paraformer_enhanced.py <audio_path>")
        sys.exit(1)

    audio_path = sys.argv[1]
    
    # 检查文件是否存在
    if not os.path.exists(audio_path):
        print(json.dumps([{"error": "音频文件不存在"}], ensure_ascii=False))
        sys.exit(1)

    # 判断是否为 webm 格式
    ext = os.path.splitext(audio_path)[1].lower()
    wav_path = None
    
    if ext == '.webm':
        wav_path = audio_path + '.tmp.wav'
        if not convert_webm_to_wav(audio_path, wav_path):
            print(json.dumps([{"error": "音频转换失败"}], ensure_ascii=False))
            sys.exit(1)
        audio_path_for_asr = wav_path
    else:
        audio_path_for_asr = audio_path

    # 使用全局模型实例
    model = load_model_once()
    if model is None:
        print(json.dumps([{"error": "模型加载失败"}], ensure_ascii=False))
        sys.exit(1)

    try:
        # 使用语音识别模型进行识别
        start_time = time.time()
        
        # 使用whisper进行识别，设置参数优化CPU性能
        result = model.transcribe(
            audio_path_for_asr, 
            language="zh",
            fp16=False,  # 强制使用FP32，避免FP16警告
            verbose=False,  # 减少输出
            temperature=0.0,  # 确定性输出
            compression_ratio_threshold=2.4,  # 优化识别质量
            logprob_threshold=-1.0  # 降低阈值提高识别率
        )
        
        end_time = time.time()
        processing_time = end_time - start_time
        
        # 处理识别结果
        if result and 'text' in result:
            recognized_text = result['text'].strip()
            
            # 输出结果
            if recognized_text:
                print(json.dumps([{"text": recognized_text}], ensure_ascii=False))
            else:
                print(json.dumps([], ensure_ascii=False))
        else:
            print(json.dumps([], ensure_ascii=False))
        
        # 保存处理时间信息到文件
        try:
            with open("whisper_result.txt", "w", encoding="utf-8") as f:
                f.write(f"处理时间: {processing_time:.2f}秒\n")
                f.write(f"音频文件: {audio_path}\n")
                f.write(f"识别结果: {recognized_text}\n")
                f.write(json.dumps(result, ensure_ascii=False, indent=2))
        except Exception as write_error:
            safe_print(f"警告: 无法写入结果文件: {write_error}")

    except Exception as e:
        error_msg = f"识别失败: {str(e)}"
        safe_print(f"错误: {error_msg}")
        print(json.dumps([{"error": error_msg}], ensure_ascii=False))

    finally:
        # 清理临时 wav 文件
        if wav_path and os.path.exists(wav_path):
            try:
                os.remove(wav_path)
            except:
                pass

if __name__ == "__main__":
    main()
