"""
音频处理路由
基于DashScope实现语音转文字功能
"""

import asyncio
import json
import os
import tempfile

import dashscope
from dotenv import load_dotenv
from fastapi import APIRouter, WebSocket, WebSocketDisconnect, HTTPException, UploadFile, File, Form

from audio.dashscope.ASRAudio import audio_manager
# 导入TTS管理器
from audio.dashscope.TTSAudio import tts_manager
from config.logging_config import get_logger
import base64
# 加载环境变量
load_dotenv()

# 配置日志
logger = get_logger(__name__)

router = APIRouter()


@router.websocket("/ws/recognition/{session_id}")
async def websocket_audio_recognition(websocket: WebSocket, session_id: str):
    """WebSocket音频识别端点 - 后端直接处理麦克风"""
    if not dashscope.api_key:
        await websocket.close(code=1008, reason="DashScope API Key 未配置")
        return

    await websocket.accept()

    try:
        # 使用默认配置
        default_config = {
            'model': 'paraformer-realtime-v2',
            'format': 'pcm',
            'sample_rate': 16000,  # 使用16000Hz采样率
            'language_hints': ['zh'],
            'channels': 1
        }

        # 设置主事件循环
        loop = asyncio.get_running_loop()
        audio_manager.set_main_loop(loop)

        # 创建识别会话
        await audio_manager.create_session(session_id, websocket, default_config)

        # 发送初始化完成消息
        await websocket.send_text(json.dumps({
            'type': 'initialized',
            'session_id': session_id,
            'message': '音频识别会话已初始化，等待开始录音指令',
            'config': default_config
        }, ensure_ascii=False))

        # 处理控制消息（不再处理音频数据）
        while True:
            try:
                # 只接收文本控制消息
                message = await websocket.receive_text()

                try:
                    control_msg = json.loads(message)
                    message_type = control_msg.get('type')

                    if message_type == 'start_recording':
                        # 开始录音 - 后端处理麦克风
                        await audio_manager.start_recording(session_id)

                    elif message_type == 'stop_recording':
                        # 停止录音
                        await audio_manager.stop_recording(session_id)

                    elif message_type == 'stop':
                        # 完全停止会话
                        break

                    else:
                        logger.warning(f"未知的控制消息类型: {message_type}")

                except json.JSONDecodeError:
                    logger.warning(f"无法解析控制消息: {message}")
                    await websocket.send_text(json.dumps({
                        'type': 'error',
                        'message': '无法解析控制消息'
                    }, ensure_ascii=False))

            except WebSocketDisconnect:
                break
            except Exception as e:
                logger.error(f"处理WebSocket消息失败: {e}")
                await websocket.send_text(json.dumps({
                    'type': 'error',
                    'message': f'处理消息失败: {str(e)}'
                }, ensure_ascii=False))
                break

    except WebSocketDisconnect:
        logger.info(f"WebSocket连接断开: {session_id}")
    except Exception as e:
        logger.error(f"WebSocket音频识别异常: {e}")
    finally:
        await audio_manager.close_session(session_id)


@router.post("/recognize/file")
async def recognize_audio_file(
        file: UploadFile = File(..., description="音频文件"),
        model: str = Form("paraformer-realtime-v2", description="识别模型"),
        language: str = Form("zh", description="语言提示")
):
    """音频文件识别接口"""
    if not dashscope.api_key:
        raise HTTPException(status_code=500, detail="DashScope API Key 未配置")

    # 验证文件类型
    allowed_extensions = ['.wav', '.mp3', '.m4a', '.flac', '.aac', '.opus']
    file_extension = os.path.splitext(file.filename)[1].lower()
    if file_extension not in allowed_extensions:
        raise HTTPException(
            status_code=400,
            detail=f"不支持的文件类型: {file_extension}，支持的类型: {', '.join(allowed_extensions)}"
        )

    # 检查文件大小 (限制为50MB)
    max_size = 50 * 1024 * 1024  # 50MB
    file_content = await file.read()
    if len(file_content) > max_size:
        raise HTTPException(status_code=400, detail=f"文件过大，最大支持 {max_size // (1024 * 1024)}MB")

    # 保存临时文件
    with tempfile.NamedTemporaryFile(delete=False, suffix=file_extension) as temp_file:
        temp_file.write(file_content)
        temp_file_path = temp_file.name

    try:
        # 使用DashScope进行音频识别
        from dashscope.audio.asr import Transcription

        response = Transcription.call(
            model=model,
            file_urls=[temp_file_path],
            language_hints=[language]
        )

        if response.status_code == 200:
            result = response.output
            return {
                'success': True,
                'text': result.get('text', ''),
                'confidence': result.get('confidence', 0),
                'usage': response.usage,
                'request_id': response.request_id
            }
        else:
            raise HTTPException(
                status_code=500,
                detail=f"识别失败: {response.message}"
            )

    except Exception as e:
        logger.error(f"音频文件识别失败: {e}")
        raise HTTPException(status_code=500, detail=f"识别失败: {str(e)}")
    finally:
        # 清理临时文件
        try:
            os.unlink(temp_file_path)
        except Exception as e:
            logger.warning(f"清理临时文件失败: {e}")


@router.get("/sessions")
async def get_active_sessions():
    """获取活跃的音频识别会话"""
    sessions = []
    for session_id, session_data in audio_manager.active_sessions.items():
        if session_data['is_active']:
            sessions.append({
                'session_id': session_id,
                'config': session_data['config'],
                'results_count': len(session_data['recognition_results'])
            })

    return {
        'success': True,
        'sessions': sessions,
        'total': len(sessions)
    }


@router.get("/sessions/{session_id}/results")
async def get_session_results(session_id: str):
    """获取指定会话的识别结果"""
    if session_id not in audio_manager.active_sessions:
        raise HTTPException(status_code=404, detail="会话不存在")

    session_data = audio_manager.active_sessions[session_id]
    return {
        'success': True,
        'session_id': session_id,
        'results': session_data['recognition_results'],
        'total': len(session_data['recognition_results'])
    }


@router.delete("/sessions/{session_id}")
async def close_recognition_session(session_id: str):
    """关闭指定的音频识别会话"""
    if session_id not in audio_manager.active_sessions:
        raise HTTPException(status_code=404, detail="会话不存在")

    await audio_manager.close_session(session_id)

    return {
        'success': True,
        'message': f'会话 {session_id} 已关闭'
    }


@router.get("/health")
async def audio_service_health():
    """音频服务健康检查"""
    return {
        'success': True,
        'service': 'audio',
        'dashscope_configured': bool(dashscope.api_key),
        'active_sessions': len(audio_manager.active_sessions),
        'version': '1.0.0'
    }


# ====== TTS 语音合成接口 ======

@router.post("/tts/synthesize")
async def synthesize_text_to_speech(
        text: str = Form(..., description="要合成的文本内容"),
        model: str = Form("sambert-zhichu-v1", description="TTS模型"),
        sample_rate: int = Form(48000, description="采样率"),
        format: str = Form("wav", description="音频格式"),
        volume: int = Form(50, description="音量 (0-100)"),
        speech_rate: float = Form(1.0, description="语速 (0.5-2.0)"),
        pitch_rate: float = Form(1.0, description="音调 (0.5-2.0)")
):
    """
    同步语音合成接口
    将文本转换为语音文件
    """
    if not dashscope.api_key:
        raise HTTPException(status_code=500, detail="DashScope API Key 未配置")

    # 验证参数
    if not text.strip():
        raise HTTPException(status_code=400, detail="文本内容不能为空")

    if len(text) > 500:
        raise HTTPException(status_code=400, detail="文本长度不能超过500字符，请使用流式接口处理长文本")

    if volume < 0 or volume > 100:
        raise HTTPException(status_code=400, detail="音量必须在0-100之间")

    if speech_rate < 0.5 or speech_rate > 2.0:
        raise HTTPException(status_code=400, detail="语速必须在0.5-2.0之间")

    if pitch_rate < 0.5 or pitch_rate > 2.0:
        raise HTTPException(status_code=400, detail="音调必须在0.5-2.0之间")

    try:
        # 构建TTS配置
        tts_config = {
            'model': model,
            'sample_rate': sample_rate,
            'format': format,
            'volume': volume,
            'speech_rate': speech_rate,
            'pitch_rate': pitch_rate
        }

        # 调用TTS合成
        audio_data = await tts_manager.synthesize_async(text, tts_config)

        if audio_data is None:
            raise HTTPException(status_code=500, detail="语音合成失败")

        # 返回音频数据
        from fastapi.responses import Response

        # 设置正确的MIME类型
        media_type = "audio/wav" if format == "wav" else f"audio/{format}"

        return Response(
            content=audio_data,
            media_type=media_type,
            headers={
                "Content-Disposition": f"attachment; filename=tts_output.{format}",
                "Content-Length": str(len(audio_data))
            }
        )

    except Exception as e:
        logger.error(f"TTS合成失败: {e}")
        raise HTTPException(status_code=500, detail=f"语音合成失败: {str(e)}")


@router.get("/tts/models")
async def get_tts_models():
    """获取可用的TTS模型列表"""
    try:
        models = tts_manager.get_available_models()
        return {
            'success': True,
            'models': models,
            'total': len(models)
        }
    except Exception as e:
        logger.error(f"获取TTS模型列表失败: {e}")
        raise HTTPException(status_code=500, detail=f"获取模型列表失败: {str(e)}")


@router.post("/tts/synthesize/base64")
async def synthesize_text_to_base64(
        text: str = Form(..., description="要合成的文本内容"),
        model: str = Form("sambert-zhichu-v1", description="TTS模型"),
        sample_rate: int = Form(48000, description="采样率"),
        format: str = Form("wav", description="音频格式"),
        volume: int = Form(50, description="音量 (0-100)"),
        speech_rate: float = Form(1.0, description="语速 (0.5-2.0)"),
        pitch_rate: float = Form(1.0, description="音调 (0.5-2.0)")
):
    """
    TTS合成返回Base64编码的音频数据
    适合前端直接播放
    """
    if not dashscope.api_key:
        raise HTTPException(status_code=500, detail="DashScope API Key 未配置")

    # 验证参数
    if not text.strip():
        raise HTTPException(status_code=400, detail="文本内容不能为空")

    if len(text) > 500:
        raise HTTPException(status_code=400, detail="文本长度不能超过500字符")

    try:
        # 构建TTS配置
        tts_config = {
            'model': model,
            'sample_rate': sample_rate,
            'format': format,
            'volume': volume,
            'speech_rate': speech_rate,
            'pitch_rate': pitch_rate
        }

        # 调用TTS合成
        audio_data = await tts_manager.synthesize_async(text, tts_config)

        if audio_data is None:
            raise HTTPException(status_code=500, detail="语音合成失败")

        # 转换为Base64

        audio_base64 = base64.b64encode(audio_data).decode('utf-8')

        # 构建数据URL
        media_type = "audio/wav" if format == "wav" else f"audio/{format}"
        data_url = f"data:{media_type};base64,{audio_base64}"

        return {
            'success': True,
            'audio_data': audio_base64,
            'data_url': data_url,
            'format': format,
            'sample_rate': sample_rate,
            'size': len(audio_data),
            'text': text
        }

    except Exception as e:
        logger.error(f"TTS合成失败: {e}")
        raise HTTPException(status_code=500, detail=f"语音合成失败: {str(e)}")


@router.get("/tts/health")
async def tts_service_health():
    """TTS服务健康检查"""
    return {
        'success': True,
        'service': 'tts',
        'dashscope_configured': bool(dashscope.api_key),
        'available_models': len(tts_manager.get_available_models()),
        'version': '1.0.0'
    }
