# 音频处理核心模块
# 基于OpenAI Whisper的高性能语音识别服务

import os
import uuid
import tempfile
import asyncio
import logging
import time
from typing import Dict, Any, Optional, List, Union, Tuple
from pathlib import Path
import whisper
import torch
from pydub import AudioSegment
from fastapi import UploadFile, HTTPException
from ..core.config import settings
from ..core.monitoring import performance_monitor, ProcessingStatus
from ..utils.memory_manager import memory_manager

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

class AudioProcessor:
    """
    高性能音频处理器类
    基于OpenAI Whisper提供企业级语音识别服务
    
    功能特性:
    - 支持多种Whisper模型 (tiny到large-v3)
    - 智能设备选择 (CPU/GPU)
    - 批量处理支持
    - 高级参数调优
    - 详细的置信度分析
    - 多语言自动检测
    - 音频预处理优化
    """
    
    def __init__(self):
        """
        初始化音频处理器
        
        自动检测最佳设备配置，延迟加载Whisper模型以减少内存占用
        """
        self.model_name = settings.WHISPER_MODEL
        self.device = self._detect_device()
        self.model = None
        self._model_cache = {}  # 模型缓存
        self._model_last_used = {}  # 模型最后使用时间
        self._max_cached_models = 2  # 最大缓存模型数量
        
        logger.info(f"音频处理器初始化 - 模型: {self.model_name}, 设备: {self.device}")
        
        # 延迟加载模型，仅在首次使用时加载
        # self._load_model(self.model_name)  # 注释掉预加载
        
        # 确保输出目录存在
        os.makedirs(settings.OUTPUT_DIR, exist_ok=True)
        os.makedirs(settings.TEMP_UPLOAD_DIR, exist_ok=True)
    
    def _detect_device(self) -> str:
        """
        智能检测最佳计算设备
        
        Returns:
            设备类型: 'cuda', 'mps', 'cpu'
        """
        if settings.WHISPER_DEVICE != "auto":
            return settings.WHISPER_DEVICE
            
        if torch.cuda.is_available():
            device = "cuda"
            gpu_name = torch.cuda.get_device_name(0)
            gpu_memory = torch.cuda.get_device_properties(0).total_memory / 1024**3
            logger.info(f"检测到CUDA设备: {gpu_name} ({gpu_memory:.1f}GB)")
        elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available():
            device = "mps"
            logger.info("检测到Apple Silicon MPS设备")
        else:
            device = "cpu"
            logger.info("使用CPU设备")
            
        return device
    
    def _load_model(self, model_name: str) -> whisper.Whisper:
        """
        加载或获取缓存的Whisper模型，带智能缓存管理
        
        Args:
            model_name: 模型名称
            
        Returns:
            Whisper模型实例
        """
        current_time = time.time()
        
        if model_name in self._model_cache:
            logger.info(f"使用缓存的模型: {model_name}")
            self._model_last_used[model_name] = current_time
            return self._model_cache[model_name]
        
        if model_name not in settings.AVAILABLE_WHISPER_MODELS:
            raise ValueError(f"不支持的模型: {model_name}. 可用模型: {list(settings.AVAILABLE_WHISPER_MODELS.keys())}")
        
        # 检查缓存大小，如果超过限制则清理最久未使用的模型
        if len(self._model_cache) >= self._max_cached_models:
            self._cleanup_model_cache()
        
        logger.info(f"正在加载Whisper模型: {model_name}")
        
        try:
            # 加载模型到指定设备
            model = whisper.load_model(
                model_name, 
                device=self.device,
                download_root=None  # 使用默认缓存目录
            )
            
            # 缓存模型
            self._model_cache[model_name] = model
            self._model_last_used[model_name] = current_time
            
            # 设置当前模型
            if self.model is None:
                self.model = model
                
            model_info = settings.AVAILABLE_WHISPER_MODELS[model_name]
            logger.info(f"模型加载完成: {model_name} ({model_info['size']}, {model_info['description']})")
            
            return model
            
        except Exception as e:
            logger.error(f"模型加载失败: {model_name} - {e}")
            raise HTTPException(
                status_code=500,
                detail=f"Whisper模型加载失败: {model_name}"
            )
    
    def _cleanup_model_cache(self):
        """
        清理模型缓存，移除最久未使用的模型以释放内存
        """
        if not self._model_cache:
            return
        
        # 找到最久未使用的模型
        oldest_model = min(self._model_last_used.items(), key=lambda x: x[1])[0]
        
        # 删除模型并释放内存
        if oldest_model in self._model_cache:
            del self._model_cache[oldest_model]
            del self._model_last_used[oldest_model]
            
            # 使用内存管理器进行清理
            memory_manager.force_garbage_collection()
            memory_manager.clear_gpu_cache()
            
            logger.info(f"已清理模型缓存: {oldest_model}")
    
    def clear_model_cache(self):
        """
        手动清理所有模型缓存
        """
        self._model_cache.clear()
        self._model_last_used.clear()
        self.model = None
        
        # 使用内存管理器进行清理
        memory_manager.force_garbage_collection()
        memory_manager.clear_gpu_cache()
        
        logger.info("已清理所有模型缓存")
    
    def get_available_models(self) -> Dict[str, Dict[str, str]]:
        """
        获取所有可用的Whisper模型信息
        
        Returns:
            模型信息字典
        """
        return settings.AVAILABLE_WHISPER_MODELS
    
    def switch_model(self, model_name: str) -> bool:
        """
        切换到指定的Whisper模型
        
        Args:
            model_name: 目标模型名称
            
        Returns:
            切换是否成功
        """
        try:
            self.model = self._load_model(model_name)
            self.model_name = model_name
            logger.info(f"已切换到模型: {model_name}")
            return True
        except Exception as e:
            logger.error(f"模型切换失败: {e}")
            return False
    
    async def process_audio_file(
        self, 
        file: UploadFile, 
        language: str = None,
        model_name: str = None,
        **kwargs
    ) -> Dict[str, Any]:
        """
        处理上传的音频文件并转换为文字（内存优化版本）
        
        Args:
            file: 上传的音频文件
            language: 音频语言代码 (如 zh, en, ja, ko等，auto为自动检测)
            model_name: 指定使用的模型名称 (可选)
            **kwargs: 其他Whisper参数
            
        Returns:
            包含转换结果的详细字典
        """
        start_time = asyncio.get_event_loop().time()
        task_id = str(uuid.uuid4())
        
        # 使用内存监控上下文
        with memory_manager.memory_monitor(f"处理音频文件: {file.filename}"):
            # 验证文件
            self._validate_audio_file(file)
            
            # 检查内存使用率
            memory_manager.check_and_cleanup()
            
            # 开始监控任务
            file_size = getattr(file, 'size', 0)
            if settings.MONITORING_ENABLED:
                performance_monitor.start_task(
                    task_id=task_id,
                    filename=file.filename,
                    file_size=file_size,
                    language=language or 'auto',
                    model=model_name or self.model_name,
                    device=self.device
                )
            
            # 临时切换模型（如果指定）
            original_model = self.model_name
            if model_name and model_name != self.model_name:
                if not self.switch_model(model_name):
                    error_msg = f"无法切换到模型: {model_name}"
                    if settings.MONITORING_ENABLED:
                        performance_monitor.fail_task(task_id, error_msg)
                    raise HTTPException(status_code=400, detail=error_msg)
            
            temp_file_path = None
            try:
                # 保存并预处理音频文件
                temp_file_path = await self._save_upload_file(file)
                processed_audio_path = await self._preprocess_audio(temp_file_path)
                
                # 再次检查内存
                memory_manager.check_and_cleanup()
                
                # 执行Whisper语音识别
                result = await self._recognize_speech_with_whisper(
                    processed_audio_path, 
                    language,
                    **kwargs
                )
                
                # 获取音频元数据
                metadata = await self._extract_audio_metadata(temp_file_path, file.filename)
                
                # 计算处理时间
                processing_time = asyncio.get_event_loop().time() - start_time
                
                # 构建完整响应
                response = {
                    "text": result["text"],
                    "confidence": result["confidence"],
                    "metadata": {
                        "language": result.get("language", language or "auto"),
                        "duration": metadata.get("duration", 0.0),
                        "format": metadata.get("format", "unknown"),
                        "filename": metadata.get("filename", file.filename),
                        "model": self.model_name,
                        "device": self.device,
                        "processing_time": round(processing_time, 2),
                        "file_size": metadata.get("file_size", 0),
                        "sample_rate": metadata.get("sample_rate"),
                        "channels": metadata.get("channels"),
                        "bit_depth": metadata.get("bit_depth")
                    },
                    "segments": result.get("segments", []),
                    "language_detection": {
                        "detected_language": result.get("language_detection", {}).get("detected_language", result.get("language", language or "auto")),
                        "auto_detected": result.get("language_detection", {}).get("auto_detected", language is None or language == "auto"),
                        "confidence": result.get("language_detection", {}).get("confidence", 0.0)
                    }
                }
                
                # 完成监控任务
                if settings.MONITORING_ENABLED:
                    performance_monitor.complete_task(
                        task_id=task_id,
                        confidence=result["confidence"],
                        text_length=len(result["text"])
                    )
                
                logger.info(
                    f"音频处理完成: {file.filename} "
                    f"({metadata['duration']:.1f}s) -> "
                    f"{len(result['text'])}字符 "
                    f"(置信度: {result['confidence']:.2f}, "
                    f"用时: {processing_time:.2f}s)"
                )
                
                return response
                
            except HTTPException as e:
                if settings.MONITORING_ENABLED:
                    performance_monitor.fail_task(task_id, str(e.detail))
                raise
            except Exception as e:
                error_msg = self._format_error_message(str(e))
                if settings.MONITORING_ENABLED:
                    performance_monitor.fail_task(task_id, error_msg)
                logger.error(f"音频处理失败: {file.filename} - {e}")
                raise HTTPException(status_code=500, detail=error_msg)
            
            finally:
                # 恢复原始模型
                if model_name and model_name != original_model:
                    self.switch_model(original_model)
                
                # 清理临时文件
                if temp_file_path:
                    self._cleanup_temp_files(temp_file_path)
                
                # 强制内存清理
                memory_manager.force_garbage_collection()
    
    async def process_audio_batch(
        self, 
        files: List[UploadFile], 
        language: str = None,
        model_name: str = None
    ) -> List[Dict[str, Any]]:
        """
        内存优化的批量处理多个音频文件
        
        Args:
            files: 音频文件列表
            language: 统一语言设置
            model_name: 统一模型设置
            
        Returns:
            处理结果列表
        """
        if len(files) > 5:  # 降低批量大小以减少内存压力
            raise HTTPException(
                status_code=400,
                detail="批量处理最多支持5个文件"
            )
        
        logger.info(f"开始批量处理 {len(files)} 个音频文件")
        
        # 使用内存监控上下文
        with memory_manager.memory_monitor(f"批量处理 {len(files)} 个音频文件"):
            # 使用更严格的并发限制
            semaphore = asyncio.Semaphore(2)  # 最多2个并发
            
            async def process_single_file(file: UploadFile) -> Dict[str, Any]:
                async with semaphore:
                    try:
                        result = await self.process_audio_file(file, language, model_name)
                        # 强制垃圾回收以释放内存
                        memory_manager.force_garbage_collection()
                        return result
                    except Exception as e:
                        # 确保异常时也进行垃圾回收
                        memory_manager.force_garbage_collection()
                        return {
                            "filename": file.filename,
                            "error": str(e),
                            "success": False
                        }
            
            # 分批处理以进一步减少内存压力
            batch_size = 2
            all_results = []
            
            for i in range(0, len(files), batch_size):
                batch_files = files[i:i + batch_size]
                
                # 检查内存状态
                memory_manager.check_and_cleanup()
                
                # 并发处理当前批次
                tasks = [process_single_file(file) for file in batch_files]
                batch_results = await asyncio.gather(*tasks, return_exceptions=True)
                
                # 处理异常结果
                for j, result in enumerate(batch_results):
                    file_index = i + j
                    if isinstance(result, Exception):
                        all_results.append({
                            "filename": files[file_index].filename,
                            "error": str(result),
                            "success": False
                        })
                    else:
                        result["success"] = True
                        all_results.append(result)
                
                # 批次间强制内存清理
                memory_manager.force_garbage_collection()
                memory_manager.clear_gpu_cache()
            
            success_count = sum(1 for r in all_results if r.get("success", False))
            logger.info(f"批量处理完成: {success_count}/{len(files)} 成功")
            
            return all_results
    
    def _validate_audio_file(self, file: UploadFile) -> None:
        """
        验证音频文件的有效性
        
        Args:
            file: 上传的文件
            
        Raises:
            HTTPException: 文件验证失败
        """
        # 检查文件名
        if not file.filename:
            raise HTTPException(status_code=400, detail="文件名不能为空")
        
        # 检查文件格式
        file_ext = self._get_file_extension(file.filename)
        if file_ext not in settings.SUPPORTED_AUDIO_FORMATS:
            raise HTTPException(
                status_code=400, 
                detail=f"不支持的音频格式: {file_ext}。支持的格式: {', '.join(settings.SUPPORTED_AUDIO_FORMATS)}"
            )
        
        # 检查文件大小（如果可获取）
        if hasattr(file, 'size') and file.size:
            max_size = settings.MAX_AUDIO_SIZE_MB * 1024 * 1024
            if file.size > max_size:
                raise HTTPException(
                    status_code=400,
                    detail=f"文件大小超过限制: {file.size / 1024 / 1024:.1f}MB > {settings.MAX_AUDIO_SIZE_MB}MB"
                )
    
    def _format_error_message(self, error_msg: str) -> str:
        """
        格式化错误消息，提供用户友好的错误描述
        
        Args:
            error_msg: 原始错误消息
            
        Returns:
            格式化后的错误消息
        """
        error_msg_lower = error_msg.lower()
        
        # 音频格式错误
        if any(keyword in error_msg_lower for keyword in [
            "invalid argument", "could not seek", "failed to read frame", 
            "invalid data found", "format not supported", "codec not found",
            "no audio", "empty audio", "duration too short"
        ]):
            return "音频文件格式不支持或文件已损坏，请使用标准的音频格式（MP3、WAV、FLAC等）"
        
        # 文件访问错误
        elif any(keyword in error_msg_lower for keyword in [
            "no such file", "permission denied", "access denied", "file not found"
        ]):
            return "音频文件访问失败，请检查文件是否存在和权限设置"
        
        # 内存/设备错误
        elif any(keyword in error_msg_lower for keyword in [
            "cuda", "memory", "out of memory", "device", "allocation"
        ]):
            return "语音识别处理失败，可能是内存不足或设备错误，请尝试使用较小的模型"
        
        # 模型错误
        elif any(keyword in error_msg_lower for keyword in [
            "model", "checkpoint", "load", "download"
        ]):
            return "Whisper模型加载或处理失败，请检查网络连接或尝试重新启动服务"
        
        # 网络错误
        elif any(keyword in error_msg_lower for keyword in [
            "connection", "timeout", "network", "download"
        ]):
            return "网络连接失败，请检查网络设置或稍后重试"
        
        # 默认错误
        else:
            return f"音频处理过程中发生错误: {error_msg[:100]}{'...' if len(error_msg) > 100 else ''}"
    
    async def _save_upload_file(self, file: UploadFile) -> str:
        """
        保存上传的文件到临时目录
        
        Args:
            file: 上传的文件
            
        Returns:
            临时文件路径
        """
        file_ext = self._get_file_extension(file.filename)
        temp_file = os.path.join(
            settings.TEMP_UPLOAD_DIR, 
            f"{uuid.uuid4()}.{file_ext}"
        )
        
        # 确保目录存在
        os.makedirs(os.path.dirname(temp_file), exist_ok=True)
        
        # 写入文件内容
        with open(temp_file, "wb") as buffer:
            content = await file.read()
            if not content:
                raise HTTPException(status_code=400, detail="上传的文件为空")
            buffer.write(content)
        
        # 验证文件大小
        file_size = os.path.getsize(temp_file)
        max_size = settings.MAX_AUDIO_SIZE_MB * 1024 * 1024
        if file_size > max_size:
            os.remove(temp_file)
            raise HTTPException(
                status_code=400,
                detail=f"文件大小超过限制: {file_size / 1024 / 1024:.1f}MB > {settings.MAX_AUDIO_SIZE_MB}MB"
            )
        
        logger.debug(f"文件已保存: {temp_file} ({file_size / 1024:.1f}KB)")
        return temp_file
    
    async def _preprocess_audio(self, audio_path: str) -> str:
        """
        内存优化的音频预处理，减少内存占用
        
        Args:
            audio_path: 原始音频文件路径
            
        Returns:
            预处理后的音频文件路径
        """
        try:
            # 使用更小的块大小加载音频以减少内存占用
            audio = AudioSegment.from_file(audio_path)
            
            # 检查音频时长
            duration = len(audio) / 1000.0
            if duration < 0.1:
                raise ValueError("音频时长过短，无法处理")
            if duration > 1800:  # 降低到30分钟限制以减少内存压力
                raise ValueError("音频时长过长，请分段处理（建议30分钟以内）")
            
            # 检查是否需要预处理
            needs_processing = (
                audio.frame_rate != 16000 or 
                audio.channels > 1 or 
                audio.max_dBFS < -30
            )
            
            # 如果不需要处理，直接返回原文件
            if not needs_processing:
                return audio_path
            
            # 分步处理以减少内存占用
            processed_audio = audio
            
            # 标准化采样率 (Whisper推荐16kHz)
            if audio.frame_rate != 16000:
                processed_audio = processed_audio.set_frame_rate(16000)
                logger.debug(f"采样率已调整: {audio.frame_rate}Hz -> 16000Hz")
                # 释放原始音频内存
                if processed_audio is not audio:
                    del audio
            
            # 转换为单声道
            if processed_audio.channels > 1:
                temp_audio = processed_audio.set_channels(1)
                if temp_audio is not processed_audio:
                    del processed_audio
                processed_audio = temp_audio
                logger.debug(f"声道已调整: 多声道 -> 1")
            
            # 音量标准化（仅在必要时）
            if processed_audio.max_dBFS < -30:  # 音量过低
                temp_audio = processed_audio.normalize()
                if temp_audio is not processed_audio:
                    del processed_audio
                processed_audio = temp_audio
                logger.debug("音量已标准化")
            
            # 保存预处理后的音频
            processed_path = audio_path.replace(
                os.path.splitext(audio_path)[1], 
                "_processed.wav"
            )
            
            # 使用较低的参数以减少内存占用
            processed_audio.export(
                processed_path, 
                format="wav",
                parameters=["-ac", "1", "-ar", "16000"]  # 确保单声道16kHz
            )
            
            # 释放处理后的音频内存
            del processed_audio
            
            logger.debug(f"音频预处理完成: {processed_path}")
            return processed_path
            
        except Exception as e:
            logger.warning(f"音频预处理失败，使用原文件: {e}")
            return audio_path
    
    async def _extract_audio_metadata(self, audio_path: str, filename: str) -> Dict[str, Any]:
        """
        提取音频文件的详细元数据
        
        Args:
            audio_path: 音频文件路径
            filename: 原始文件名
            
        Returns:
            音频元数据字典
        """
        try:
            audio = AudioSegment.from_file(audio_path)
            
            return {
                "filename": filename,
                "format": self._get_file_extension(filename),
                "duration": len(audio) / 1000.0,  # 秒
                "sample_rate": audio.frame_rate,
                "channels": audio.channels,
                "bit_depth": audio.sample_width * 8,
                "file_size": os.path.getsize(audio_path),
                "max_volume_db": audio.max_dBFS,
                "rms_volume_db": audio.rms
            }
        except Exception as e:
            logger.warning(f"元数据提取失败: {e}")
            return {
                "filename": filename,
                "format": self._get_file_extension(filename),
                "duration": 0.0,
                "file_size": os.path.getsize(audio_path) if os.path.exists(audio_path) else 0
            }
    
    async def _recognize_speech_with_whisper(
        self, 
        audio_path: str, 
        language: str = None,
        **kwargs
    ) -> Dict[str, Any]:
        """
        使用Whisper进行高精度语音识别
        
        Args:
            audio_path: 音频文件路径
            language: 语言代码 (如 zh, en, ja, ko等，auto为自动检测)
            **kwargs: 额外的Whisper参数
            
        Returns:
            包含详细识别结果的字典
        """
        try:
            # 确保模型已加载
            if self.model is None:
                self.model = self._load_model(self.model_name)
            
            # 转换语言代码为Whisper支持的格式
            whisper_language = self._convert_language_for_whisper(language)
            
            # 构建Whisper参数
            transcribe_options = {
                "verbose": False,
                "word_timestamps": True,
                "temperature": kwargs.get("temperature", settings.WHISPER_TEMPERATURE),
                "best_of": kwargs.get("best_of", settings.WHISPER_BEST_OF),
                "beam_size": kwargs.get("beam_size", settings.WHISPER_BEAM_SIZE),
                "patience": kwargs.get("patience", settings.WHISPER_PATIENCE),
                "length_penalty": kwargs.get("length_penalty", settings.WHISPER_LENGTH_PENALTY),
                "suppress_tokens": kwargs.get("suppress_tokens", settings.WHISPER_SUPPRESS_TOKENS),
                "condition_on_previous_text": kwargs.get(
                    "condition_on_previous_text", 
                    settings.WHISPER_CONDITION_ON_PREVIOUS_TEXT
                ),
                "fp16": kwargs.get("fp16", settings.WHISPER_FP16),
                "compression_ratio_threshold": kwargs.get(
                    "compression_ratio_threshold", 
                    settings.WHISPER_COMPRESSION_RATIO_THRESHOLD
                ),
                "logprob_threshold": kwargs.get(
                    "logprob_threshold", 
                    settings.WHISPER_LOGPROB_THRESHOLD
                ),
                "no_speech_threshold": kwargs.get(
                    "no_speech_threshold", 
                    settings.WHISPER_NO_SPEECH_THRESHOLD
                )
            }
            
            # 添加语言参数
            if whisper_language and whisper_language != "auto":
                transcribe_options["language"] = whisper_language
            
            # 添加初始提示（如果有）
            if settings.WHISPER_INITIAL_PROMPT:
                transcribe_options["initial_prompt"] = settings.WHISPER_INITIAL_PROMPT
            
            logger.debug(f"开始Whisper识别: {audio_path}")
            logger.debug(f"识别参数: {transcribe_options}")
            
            # 执行Whisper转录
            result = await asyncio.get_event_loop().run_in_executor(
                None, 
                lambda: self.model.transcribe(audio_path, **transcribe_options)
            )
            
            # 提取和处理结果
            text = result["text"].strip()
            detected_language = result["language"]
            segments = result.get("segments", [])
            
            # 计算详细的置信度分析
            confidence_analysis = self._analyze_confidence(segments, result)
            
            # 语言检测分析
            language_detection = {
                "detected_language": detected_language,
                "requested_language": language,
                "whisper_language": whisper_language,
                "auto_detected": whisper_language is None or whisper_language == "auto"
            }
            
            # 处理空文本或低质量识别
            if not text or confidence_analysis["avg_confidence"] < 0.1:
                fallback_result = await self._handle_low_quality_audio(
                    audio_path, detected_language, confidence_analysis
                )
                return {
                    **fallback_result,
                    "language_detection": language_detection,
                    "segments": segments,
                    "whisper_result": result
                }
            
            # 文本后处理
            processed_text = self._post_process_text(text, detected_language)
            
            logger.info(
                f"Whisper识别成功: {len(processed_text)}字符, "
                f"语言: {detected_language}, "
                f"置信度: {confidence_analysis['avg_confidence']:.2f}"
            )
            
            return {
                "text": processed_text,
                "confidence": confidence_analysis["avg_confidence"],
                "language": detected_language,
                "segments": segments,
                "language_detection": language_detection,
                "confidence_analysis": confidence_analysis,
                "whisper_result": result
            }
            
        except Exception as e:
            logger.error(f"Whisper识别失败: {e}")
            # 返回降级处理结果
            return await self._handle_recognition_failure(audio_path, language, str(e))
    
    def _convert_language_for_whisper(self, language: str) -> Optional[str]:
        """
        将标准语言代码转换为Whisper支持的语言代码
        
        Whisper支持的主要语言代码:
        zh: 中文, en: 英语, ja: 日语, ko: 韩语, fr: 法语, de: 德语, 
        es: 西班牙语, it: 意大利语, pt: 葡萄牙语, ru: 俄语等
        """
        if not language:
            return None
            
        # 语言代码映射
        language_mapping = {
            'zh-CN': 'zh',
            'zh-TW': 'zh', 
            'zh-HK': 'zh',
            'en-US': 'en',
            'en-GB': 'en',
            'en-AU': 'en',
            'ja-JP': 'ja',
            'ko-KR': 'ko',
            'fr-FR': 'fr',
            'de-DE': 'de',
            'es-ES': 'es',
            'it-IT': 'it',
            'pt-PT': 'pt',
            'pt-BR': 'pt',
            'ru-RU': 'ru',
            'ar-SA': 'ar',
            'hi-IN': 'hi',
            'th-TH': 'th',
            'vi-VN': 'vi'
        }
        
        # 直接匹配
        if language in language_mapping:
            return language_mapping[language]
        
        # 尝试匹配语言前缀
        lang_prefix = language.split('-')[0] if '-' in language else language
        
        # Whisper支持的语言列表（部分）
        supported_languages = [
            'zh', 'en', 'ja', 'ko', 'fr', 'de', 'es', 'it', 'pt', 'ru',
            'ar', 'hi', 'th', 'vi', 'nl', 'pl', 'sv', 'da', 'no', 'fi'
        ]
        
        if lang_prefix in supported_languages:
            return lang_prefix
            
        # 如果不支持，返回None让Whisper自动检测
        return None
    
    def _analyze_confidence(self, segments: list, result: dict) -> dict:
        """
        详细分析Whisper识别的置信度
        
        Args:
            segments: Whisper返回的segments列表
            result: 完整的Whisper结果
            
        Returns:
            详细的置信度分析结果
        """
        if not segments:
            return {
                "avg_confidence": 0.5,
                "min_confidence": 0.5,
                "max_confidence": 0.5,
                "confidence_variance": 0.0,
                "low_confidence_segments": 0,
                "total_segments": 0,
                "quality_score": "medium"
            }
        
        confidences = []
        total_confidence = 0.0
        total_duration = 0.0
        low_confidence_count = 0
        
        for segment in segments:
            # 计算segment置信度
            avg_logprob = segment.get('avg_logprob', -1.0)
            no_speech_prob = segment.get('no_speech_prob', 0.5)
            
            # 综合计算置信度
            logprob_confidence = max(0.0, min(1.0, (avg_logprob + 1.0)))
            speech_confidence = 1.0 - no_speech_prob
            segment_confidence = (logprob_confidence + speech_confidence) / 2.0
            
            confidences.append(segment_confidence)
            
            # 加权平均
            duration = segment.get('end', 0) - segment.get('start', 0)
            if duration > 0:
                total_confidence += segment_confidence * duration
                total_duration += duration
            
            # 统计低置信度片段
            if segment_confidence < 0.6:
                low_confidence_count += 1
        
        # 计算统计指标
        avg_confidence = total_confidence / total_duration if total_duration > 0 else 0.5
        min_confidence = min(confidences) if confidences else 0.5
        max_confidence = max(confidences) if confidences else 0.5
        
        # 计算方差
        if len(confidences) > 1:
            variance = sum((c - avg_confidence) ** 2 for c in confidences) / len(confidences)
        else:
            variance = 0.0
        
        # 质量评估
        if avg_confidence >= 0.8 and low_confidence_count / len(segments) < 0.2:
            quality_score = "high"
        elif avg_confidence >= 0.6 and low_confidence_count / len(segments) < 0.4:
            quality_score = "medium"
        else:
            quality_score = "low"
        
        return {
            "avg_confidence": avg_confidence,
            "min_confidence": min_confidence,
            "max_confidence": max_confidence,
            "confidence_variance": variance,
            "low_confidence_segments": low_confidence_count,
            "total_segments": len(segments),
            "quality_score": quality_score,
            "segment_confidences": confidences
        }
    
    def _calculate_confidence(self, segments: list) -> float:
        """
        向后兼容的置信度计算方法
        
        Args:
            segments: Whisper返回的segments列表
            
        Returns:
            平均置信度 (0.0-1.0)
        """
        analysis = self._analyze_confidence(segments, {})
        return analysis["avg_confidence"]
    
    def _simple_audio_analysis(self, audio_path: str, language: str) -> Dict[str, Any]:
        """
        基于音频特征的简单分析，当无法识别语音时返回空结果
        """
        try:
            # 获取音频基本信息
            duration = self._get_audio_duration(audio_path)
            
            return {
                "text": "",
                "confidence": 0.0,
                "language": language or "unknown",
                "segments": []
            }
            
        except Exception as e:
            logger.warning(f"音频分析失败: {e}")
            return {
                "text": "",
                "confidence": 0.0,
                "language": language or "unknown",
                "segments": []
            }
    
    def _get_audio_duration(self, audio_path: str) -> float:
        """
        获取音频文件的时长(秒)
        """
        try:
            audio = AudioSegment.from_file(audio_path)
            return len(audio) / 1000.0  # 毫秒转秒
        except Exception:
            return 0.0
    
    def _get_file_extension(self, filename: str) -> str:
        """
        从文件名获取扩展名
        """
        return filename.split(".")[-1].lower() if "." in filename else ""
    
    def _post_process_text(self, text: str, language: str) -> str:
        """
        对识别出的文本进行后处理优化
        
        Args:
            text: 原始识别文本
            language: 检测到的语言
            
        Returns:
            后处理后的文本
        """
        if not text:
            return text
        
        # 基本清理
        processed_text = text.strip()
        
        # 中文文本处理
        if language and language.startswith('zh'):
            # 移除多余的空格
            processed_text = ''.join(processed_text.split())
            # 标点符号标准化
            processed_text = processed_text.replace('，，', '，')
            processed_text = processed_text.replace('。。', '。')
            processed_text = processed_text.replace('？？', '？')
            processed_text = processed_text.replace('！！', '！')
        
        # 英文文本处理
        elif language and language.startswith('en'):
            # 标准化空格
            import re
            processed_text = re.sub(r'\s+', ' ', processed_text)
            # 修复标点符号前的空格
            processed_text = re.sub(r'\s+([,.!?;:])', r'\1', processed_text)
            # 修复引号
            processed_text = re.sub(r'\s+"', ' "', processed_text)
            processed_text = re.sub(r'"\s+', '" ', processed_text)
        
        return processed_text
    
    async def _handle_low_quality_audio(self, audio_path: str, language: str, confidence_analysis: dict) -> dict:
        """
        处理低质量音频的降级策略
        
        Args:
            audio_path: 音频文件路径
            language: 检测到的语言
            confidence_analysis: 置信度分析结果
            
        Returns:
            降级处理结果
        """
        logger.warning(f"检测到低质量音频，置信度: {confidence_analysis.get('avg_confidence', 0):.2f}")
        
        # 尝试使用更保守的参数重新识别
        try:
            conservative_options = {
                "temperature": 0.0,  # 更保守
                "best_of": 3,
                "beam_size": 3,
                "patience": 2.0,
                "condition_on_previous_text": False,
                "compression_ratio_threshold": 1.8,  # 更宽松
                "logprob_threshold": -1.5,  # 更宽松
                "no_speech_threshold": 0.8   # 更严格
            }
            
            result = await asyncio.get_event_loop().run_in_executor(
                None,
                lambda: self.model.transcribe(audio_path, **conservative_options)
            )
            
            text = result["text"].strip()
            if text and len(text) > 5:  # 有一定长度的文本
                return {
                    "text": self._post_process_text(text, language),
                    "confidence": 0.3,  # 标记为低置信度
                    "language": result["language"],
                    "segments": result.get("segments", []),
                    "fallback_used": True
                }
        
        except Exception as e:
            logger.error(f"降级处理也失败: {e}")
        
        # 最终降级到描述性文本
        return self._simple_audio_analysis(audio_path, language)
    
    async def _handle_recognition_failure(self, audio_path: str, language: str, error_msg: str) -> dict:
        """
        处理识别完全失败的情况
        
        Args:
            audio_path: 音频文件路径
            language: 指定的语言
            error_msg: 错误消息
            
        Returns:
            失败处理结果
        """
        logger.error(f"语音识别完全失败: {error_msg}")
        
        # 尝试基本的音频分析
        try:
            return self._simple_audio_analysis(audio_path, language)
        except Exception:
            # 最终的兜底响应
            return {
                "text": "",
                "confidence": 0.0,
                "language": language or "unknown",
                "segments": [],
                "error": error_msg
            }
    
    def _cleanup_temp_files(self, *file_paths: str):
        """
        清理临时文件
        """
        for path in file_paths:
            if os.path.exists(path):
                try:
                    os.remove(path)
                    logger.debug(f"已清理临时文件: {path}")
                except Exception as e:
                    logger.warning(f"清理临时文件失败: {path} - {e}")

# 创建全局音频处理器实例
audio_processor = AudioProcessor()