# 音频转文字API路由

from fastapi import APIRouter, Depends, UploadFile, File, Form, HTTPException, Header, Query
from typing import Optional, Dict, Any, List
import logging

from ..core.audio_processor import audio_processor
from ..core.config import settings
from ..core.monitoring import performance_monitor
from ..models.schemas import AudioToTextResponse, ErrorResponse

logger = logging.getLogger(__name__)

router = APIRouter()

# 简单的API密钥验证依赖
async def verify_api_key(api_key: str = Header(None, alias=settings.API_KEY_HEADER)):
    """
    验证API密钥
    """
    if settings.API_KEY and settings.API_KEY != "":
        if not api_key:
            raise HTTPException(
                status_code=401,
                detail="缺少API密钥"
            )
        if api_key != settings.API_KEY:
            raise HTTPException(
                status_code=403,
                detail="API密钥无效"
            )
    return True

@router.post(
    "/audio-to-text",
    response_model=AudioToTextResponse,
    responses={
        400: {"model": ErrorResponse},
        401: {"model": ErrorResponse},
        403: {"model": ErrorResponse},
        500: {"model": ErrorResponse},
    },
    summary="音频转文字",
    description="上传音频文件并转换为文字，支持多种Whisper模型和高级参数",
    tags=["音频处理"],
)
async def convert_audio_to_text(
    file: UploadFile = File(..., description="音频文件"),
    language: Optional[str] = Form(None, description=f"语言代码，支持: {', '.join(settings.SUPPORTED_LANGUAGES)}"),
    model: Optional[str] = Form(None, description="Whisper模型名称，如不指定则使用默认模型"),
    temperature: Optional[float] = Form(None, description="温度参数 (0.0-1.0)，控制输出随机性"),
    best_of: Optional[int] = Form(None, description="候选数量 (1-5)，更多候选可能提高质量"),
    beam_size: Optional[int] = Form(None, description="束搜索大小，影响解码质量"),
    patience: Optional[float] = Form(None, description="耐心参数，控制束搜索的探索程度"),
    include_segments: bool = Form(False, description="是否包含详细的时间段信息"),
    include_confidence_analysis: bool = Form(False, description="是否包含详细的置信度分析"),
    preprocess_audio: bool = Form(True, description="是否对音频进行预处理优化"),
    _: bool = Depends(verify_api_key)
) -> Dict[str, Any]:
    """
    将上传的音频文件转换为文字
    
    支持的高级功能：
    - 动态模型切换
    - 音频预处理优化
    - 详细的置信度分析
    - 时间段信息提取
    - 多语言自动检测
    """
    try:
        # 验证语言代码
        if language and language not in settings.SUPPORTED_LANGUAGES:
            raise HTTPException(
                status_code=400,
                detail=f"不支持的语言代码: {language}。支持的语言: {', '.join(settings.SUPPORTED_LANGUAGES)}"
            )
        
        # 验证模型名称
        if model:
            available_models = audio_processor.get_available_models()
            if model not in available_models:
                raise HTTPException(
                    status_code=400,
                    detail=f"不支持的模型: {model}。可用模型: {', '.join(available_models.keys())}"
                )
        
        # 验证文件大小
        file_size_mb = 0
        file.file.seek(0, 2)  # 移动到文件末尾
        file_size_mb = file.file.tell() / (1024 * 1024)  # 转换为MB
        file.file.seek(0)  # 重置文件指针
        
        if file_size_mb > settings.MAX_AUDIO_SIZE_MB:
            raise HTTPException(
                status_code=400,
                detail=f"文件太大。最大允许大小: {settings.MAX_AUDIO_SIZE_MB}MB"
            )
        
        logger.info(f"开始处理音频文件: {file.filename}, 大小: {file_size_mb:.2f}MB")
        
        # 构建处理选项
        process_options = {
            "model": model,
            "preprocess_audio": preprocess_audio
        }
        
        # 构建Whisper参数
        whisper_params = {}
        if temperature is not None:
            whisper_params["temperature"] = temperature
        if best_of is not None:
            whisper_params["best_of"] = best_of
        if beam_size is not None:
            whisper_params["beam_size"] = beam_size
        if patience is not None:
            whisper_params["patience"] = patience
        
        # 处理音频文件
        result = await audio_processor.process_audio_file(
            file, 
            language, 
            **process_options,
            **whisper_params
        )
        
        # 构建响应
        response = {
            "text": result["text"],
            "confidence": result["confidence"],
            "metadata": result["metadata"]
        }
        
        # 添加可选的详细信息
        if include_segments and "segments" in result:
            response["segments"] = result["segments"]
        
        if include_confidence_analysis and "confidence_analysis" in result:
            response["confidence_analysis"] = result["confidence_analysis"]
        
        if "language_detection" in result:
            response["language_detection"] = result["language_detection"]
        
        logger.info(f"音频处理完成: {file.filename}, 文本长度: {len(result['text'])}")
        
        return response
        
    except HTTPException:
        raise
    except Exception as e:
        import traceback
        error_traceback = traceback.format_exc()
        logger.error(f"音频处理失败: {file.filename if file else 'unknown'} - {str(e)}")
        logger.error(f"详细错误堆栈: {error_traceback}")
        raise HTTPException(
            status_code=500,
            detail=f"音频处理失败: {str(e)}"
        )

@router.get(
    "/supported-formats",
    summary="获取支持的音频格式",
    description="返回服务支持的音频格式列表",
    tags=["系统信息"],
)
async def get_supported_formats():
    """
    获取支持的音频格式列表
    """
    return {
        "formats": settings.SUPPORTED_AUDIO_FORMATS
    }

@router.get(
    "/supported-languages",
    summary="获取支持的语言",
    description="返回服务支持的语言列表",
    tags=["系统信息"],
)
async def get_supported_languages():
    """
    获取支持的语言列表
    """
    return {
        "languages": settings.SUPPORTED_LANGUAGES,
        "default": settings.DEFAULT_LANGUAGE
    }

@router.post(
    "/audio-to-text/batch",
    summary="批量音频转文字",
    description="批量处理多个音频文件，支持并发处理",
    tags=["音频处理"],
)
async def convert_audio_batch(
    files: List[UploadFile] = File(..., description="音频文件列表（最多10个）"),
    language: Optional[str] = Form(None, description=f"语言代码，支持: {', '.join(settings.SUPPORTED_LANGUAGES)}"),
    model: Optional[str] = Form(None, description="Whisper模型名称"),
    include_segments: bool = Form(False, description="是否包含详细的时间段信息"),
    _: bool = Depends(verify_api_key)
) -> Dict[str, Any]:
    """
    批量处理音频文件
    """
    try:
        # 验证文件数量
        if len(files) > 10:
            raise HTTPException(
                status_code=400,
                detail="批量处理最多支持10个文件"
            )
        
        if not files:
            raise HTTPException(
                status_code=400,
                detail="请至少上传一个音频文件"
            )
        
        # 验证语言代码
        if language and language not in settings.SUPPORTED_LANGUAGES:
            raise HTTPException(
                status_code=400,
                detail=f"不支持的语言代码: {language}。支持的语言: {', '.join(settings.SUPPORTED_LANGUAGES)}"
            )
        
        # 验证模型名称
        if model:
            available_models = audio_processor.get_available_models()
            if model not in available_models:
                raise HTTPException(
                    status_code=400,
                    detail=f"不支持的模型: {model}。可用模型: {', '.join(available_models.keys())}"
                )
        
        logger.info(f"开始批量处理 {len(files)} 个音频文件")
        
        # 批量处理
        results = await audio_processor.process_audio_batch(
            files, 
            language=language,
            model=model
        )
        
        # 构建响应
        response = {
            "total_files": len(files),
            "successful": len([r for r in results if "error" not in r]),
            "failed": len([r for r in results if "error" in r]),
            "results": []
        }
        
        for i, result in enumerate(results):
            file_result = {
                "file_index": i,
                "filename": files[i].filename,
                "text": result.get("text", ""),
                "confidence": result.get("confidence", 0.0),
                "metadata": result.get("metadata", {})
            }
            
            if "error" in result:
                file_result["error"] = result["error"]
            
            if include_segments and "segments" in result:
                file_result["segments"] = result["segments"]
            
            response["results"].append(file_result)
        
        logger.info(f"批量处理完成: {response['successful']}/{response['total_files']} 成功")
        
        return response
        
    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"批量处理失败: {str(e)}")
        raise HTTPException(
            status_code=500,
            detail=f"批量处理失败: {str(e)}"
        )

@router.get(
    "/models",
    summary="获取可用的Whisper模型",
    description="返回所有可用的Whisper模型及其详细信息",
    tags=["系统信息"],
)
async def get_available_models():
    """
    获取可用的Whisper模型列表
    """
    try:
        models = audio_processor.get_available_models()
        current_model = getattr(audio_processor, 'current_model_name', settings.WHISPER_MODEL)
        
        return {
            "current_model": current_model,
            "available_models": models,
            "total_models": len(models)
        }
    except Exception as e:
        logger.error(f"获取模型列表失败: {str(e)}")
        raise HTTPException(
            status_code=500,
            detail=f"获取模型列表失败: {str(e)}"
        )

@router.post(
    "/models/switch",
    summary="切换Whisper模型",
    description="动态切换当前使用的Whisper模型",
    tags=["系统管理"],
)
async def switch_model(
    model_name: str = Form(..., description="要切换到的模型名称"),
    _: bool = Depends(verify_api_key)
):
    """
    切换当前使用的Whisper模型
    """
    try:
        available_models = audio_processor.get_available_models()
        if model_name not in available_models:
            raise HTTPException(
                status_code=400,
                detail=f"不支持的模型: {model_name}。可用模型: {', '.join(available_models.keys())}"
            )
        
        logger.info(f"切换模型: {getattr(audio_processor, 'current_model_name', 'unknown')} -> {model_name}")
        
        success = await audio_processor.switch_model(model_name)
        
        if success:
            return {
                "success": True,
                "message": f"成功切换到模型: {model_name}",
                "current_model": model_name,
                "model_info": available_models[model_name]
            }
        else:
            raise HTTPException(
                status_code=500,
                detail=f"切换模型失败: {model_name}"
            )
            
    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"切换模型失败: {model_name} - {str(e)}")
        raise HTTPException(
            status_code=500,
            detail=f"切换模型失败: {str(e)}"
        )

@router.get(
    "/system/status",
    summary="获取系统状态",
    description="返回系统运行状态和配置信息",
    tags=["系统信息"],
)
async def get_system_status():
    """
    获取系统状态信息
    """
    try:
        import torch
        import psutil
        import platform
        
        # 获取设备信息
        device_info = {
            "current_device": str(getattr(audio_processor, 'device', 'unknown')),
            "cuda_available": torch.cuda.is_available(),
            "cuda_device_count": torch.cuda.device_count() if torch.cuda.is_available() else 0
        }
        
        if torch.cuda.is_available():
            device_info["cuda_device_name"] = torch.cuda.get_device_name(0)
            device_info["cuda_memory_total"] = torch.cuda.get_device_properties(0).total_memory
        
        # 获取系统信息
        system_info = {
            "platform": platform.platform(),
            "python_version": platform.python_version(),
            "cpu_count": psutil.cpu_count(),
            "memory_total": psutil.virtual_memory().total,
            "memory_available": psutil.virtual_memory().available
        }
        
        # 获取配置信息
        config_info = {
            "max_audio_size_mb": settings.MAX_AUDIO_SIZE_MB,
            "supported_formats": settings.SUPPORTED_AUDIO_FORMATS,
            "supported_languages": len(settings.SUPPORTED_LANGUAGES),
            "default_model": settings.WHISPER_MODEL,
            "whisper_device": settings.WHISPER_DEVICE
        }
        
        return {
            "status": "running",
            "device_info": device_info,
            "system_info": system_info,
            "config_info": config_info,
            "current_model": getattr(audio_processor, 'current_model_name', settings.WHISPER_MODEL)
        }
        
    except Exception as e:
        logger.error(f"获取系统状态失败: {str(e)}")
        return {
            "status": "error",
            "error": str(e)
        }

# ==================== 监控相关API端点 ====================

@router.get(
    "/monitoring/statistics",
    summary="获取监控统计信息",
    description="返回详细的性能监控统计数据，包括任务统计、性能指标、错误统计等",
    tags=["监控"],
)
async def get_monitoring_statistics(
    _: bool = Depends(verify_api_key)
):
    """
    获取监控统计信息
    """
    try:
        if not settings.MONITORING_ENABLED:
            return {
                "monitoring_enabled": False,
                "message": "监控功能未启用"
            }
        
        statistics = performance_monitor.get_statistics()
        return {
            "monitoring_enabled": True,
            "statistics": statistics
        }
        
    except Exception as e:
        logger.error(f"获取监控统计失败: {str(e)}")
        raise HTTPException(
            status_code=500,
            detail=f"获取监控统计失败: {str(e)}"
        )

@router.get(
    "/monitoring/health",
    summary="获取系统健康状态",
    description="返回系统健康检查结果，包括CPU、内存使用率和错误率等关键指标",
    tags=["监控"],
)
async def get_health_status():
    """
    获取系统健康状态
    """
    try:
        if not settings.MONITORING_ENABLED:
            return {
                "status": "unknown",
                "monitoring_enabled": False,
                "message": "监控功能未启用，无法获取健康状态"
            }
        
        health_status = performance_monitor.get_health_status()
        return health_status
        
    except Exception as e:
        logger.error(f"获取健康状态失败: {str(e)}")
        return {
            "status": "error",
            "error": str(e),
            "monitoring_enabled": settings.MONITORING_ENABLED
        }

@router.get(
    "/monitoring/metrics",
    summary="获取实时监控指标",
    description="返回当前的系统性能指标，包括CPU、内存、GPU使用情况等",
    tags=["监控"],
)
async def get_monitoring_metrics(
    _: bool = Depends(verify_api_key)
):
    """
    获取实时监控指标
    """
    try:
        if not settings.MONITORING_ENABLED:
            return {
                "monitoring_enabled": False,
                "message": "监控功能未启用"
            }
        
        # 获取最新的系统指标
        latest_metrics = None
        if performance_monitor.system_metrics:
            latest_metrics = performance_monitor.system_metrics[-1].to_dict()
        
        # 获取当前活跃任务
        active_tasks = [task.to_dict() for task in performance_monitor.tasks.values()]
        
        # 获取计数器和仪表
        counters = dict(performance_monitor.counters)
        gauges = dict(performance_monitor.gauges)
        
        return {
            "monitoring_enabled": True,
            "timestamp": latest_metrics["timestamp"] if latest_metrics else None,
            "system_metrics": latest_metrics,
            "active_tasks": active_tasks,
            "counters": counters,
            "gauges": gauges,
            "active_task_count": len(active_tasks)
        }
        
    except Exception as e:
        logger.error(f"获取监控指标失败: {str(e)}")
        raise HTTPException(
            status_code=500,
            detail=f"获取监控指标失败: {str(e)}"
        )

@router.get(
    "/monitoring/tasks",
    summary="获取任务监控信息",
    description="返回当前活跃任务和最近完成任务的详细信息",
    tags=["监控"],
)
async def get_task_monitoring(
    limit: int = Query(50, description="返回的最近任务数量限制"),
    status: Optional[str] = Query(None, description="过滤任务状态 (completed, failed, processing)"),
    _: bool = Depends(verify_api_key)
):
    """
    获取任务监控信息
    """
    try:
        if not settings.MONITORING_ENABLED:
            return {
                "monitoring_enabled": False,
                "message": "监控功能未启用"
            }
        
        # 获取活跃任务
        active_tasks = [task.to_dict() for task in performance_monitor.tasks.values()]
        
        # 获取完成的任务
        completed_tasks = [task.to_dict() for task in list(performance_monitor.completed_tasks)]
        
        # 状态过滤
        if status:
            completed_tasks = [task for task in completed_tasks if task["status"] == status]
        
        # 限制数量
        completed_tasks = completed_tasks[-limit:] if limit > 0 else completed_tasks
        
        return {
            "monitoring_enabled": True,
            "active_tasks": active_tasks,
            "completed_tasks": completed_tasks,
            "active_count": len(active_tasks),
            "completed_count": len(completed_tasks)
        }
        
    except Exception as e:
        logger.error(f"获取任务监控失败: {str(e)}")
        raise HTTPException(
            status_code=500,
            detail=f"获取任务监控失败: {str(e)}"
        )

@router.post(
    "/monitoring/export",
    summary="导出监控数据",
    description="将当前的监控数据导出到JSON文件",
    tags=["监控"],
)
async def export_monitoring_data(
    _: bool = Depends(verify_api_key)
):
    """
    导出监控数据
    """
    try:
        if not settings.MONITORING_ENABLED:
            return {
                "success": False,
                "monitoring_enabled": False,
                "message": "监控功能未启用"
            }
        
        import os
        from datetime import datetime
        
        # 生成导出文件名
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        export_filename = f"monitoring_export_{timestamp}.json"
        export_path = os.path.join(os.path.dirname(settings.METRICS_EXPORT_PATH), export_filename)
        
        # 导出数据
        performance_monitor.export_metrics(export_path)
        
        return {
            "success": True,
            "export_path": export_path,
            "export_filename": export_filename,
            "timestamp": timestamp
        }
        
    except Exception as e:
        logger.error(f"导出监控数据失败: {str(e)}")
        raise HTTPException(
            status_code=500,
            detail=f"导出监控数据失败: {str(e)}"
        )

@router.get(
    "/monitoring/config",
    summary="获取监控配置",
    description="返回当前的监控配置信息",
    tags=["监控"],
)
async def get_monitoring_config():
    """
    获取监控配置信息
    """
    return {
        "monitoring_enabled": settings.MONITORING_ENABLED,
        "metrics_retention_hours": settings.METRICS_RETENTION_HOURS,
        "system_metrics_interval": settings.SYSTEM_METRICS_INTERVAL,
        "max_history_size": settings.MAX_HISTORY_SIZE,
        "health_check_thresholds": {
            "cpu_threshold": settings.HEALTH_CHECK_CPU_THRESHOLD,
            "memory_threshold": settings.HEALTH_CHECK_MEMORY_THRESHOLD,
            "error_rate_threshold": settings.HEALTH_CHECK_ERROR_RATE_THRESHOLD
        },
        "metrics_export_path": settings.METRICS_EXPORT_PATH
    }