# any4dh FastAPI 服务器
from fastapi import FastAPI, File, UploadFile, Form, HTTPException, Request, BackgroundTasks, WebSocket, WebSocketDisconnect
from fastapi.staticfiles import StaticFiles
from fastapi.responses import JSONResponse, FileResponse, StreamingResponse, HTMLResponse
from core.tts.file import file_response_with_cleanup
from fastapi.middleware.cors import CORSMiddleware
from fastapi.templating import Jinja2Templates
from pydantic import BaseModel
import base64
import json
import re
import numpy as np
from threading import Thread, Event
import torch.multiprocessing as mp

import asyncio
import torch
from typing import Dict, Optional, Any, List
import logging
import websockets
from websockets.exceptions import ConnectionClosed, ConnectionClosedError

logger = logging.getLogger(__name__)
import gc
import os

from .live_talking.basereal import BaseReal
from .live_talking.llm import llm_response
from .live_talking.websocket_server import get_server, initialize_server, shutdown_server
from .live_talking.lipreal_websocket import LipRealStream, LipRealStreamOptimized

import argparse
import random
import shutil
import time
import tempfile
from pathlib import Path

# 全局变量
any4dh_reals: Dict[int, BaseReal] = {}
opt = None
model = None
avatar = None
global_app = None  # 全局app变量引用
websocket_server = None
websocket_connections: Dict[str, WebSocket] = {}
from .live_talking.websocket_media import MediaStreamer

def randN(N) -> int:
    """生成指定位数的随机数"""
    min_val = pow(10, N - 1)
    max_val = pow(10, N)
    return random.randint(min_val, max_val - 1)

def build_any4dh_real(sessionid: int, use_optimized: bool = True) -> BaseReal:
    """构建数字人实例"""
    opt.sessionid = sessionid

    any4dh_real = LipRealStream(opt, model, avatar)

    return any4dh_real

class OfferRequest(BaseModel):
    sdp: str = "mock-sdp"
    type: str = "offer"

class HumanRequest(BaseModel):
    sessionid: int
    type: str
    text: Optional[str] = None
    interrupt: Optional[bool] = False

class InterruptRequest(BaseModel):
    sessionid: int

class SetAudioTypeRequest(BaseModel):
    sessionid: int
    audiotype: int
    reinit: bool

class RecordRequest(BaseModel):
    sessionid: int
    type: str  # 开始录音或结束录音

class IsSpeakingRequest(BaseModel):
    sessionid: int

class PlayAudioRequest(BaseModel):
    sessionid: int
    audio_url: str
    text: Optional[str] = None
    segment_index: Optional[int] = None

class _CreateSessionRequest(BaseModel):
    use_optimized: bool = True
    avatar_id: Optional[str] = None

def register_any4dh_routes(app: FastAPI):
    """注册 any4dh 数字人相关路由"""

    @app.post("/any4dh/create_session")
    async def create_session():
        """
        创建数字人会话
        返回会话ID和WebSocket连接信息
        """
        try:
            # 生成会话ID
            sessionid = randN(6)

            # 创建数字人实例（使用标准版本避免事件循环问题）
            any4dh_real = await asyncio.get_event_loop().run_in_executor(
                None, build_any4dh_real, sessionid, False
            )

            any4dh_reals[sessionid] = any4dh_real

            logger.info(f"Session {sessionid} created, waiting for WebSocket connection")

            return {
                "code": 0,
                "msg": "ok",
                "data": {
                    "sessionid": sessionid,
                    "websocket_url": f"/ws/{sessionid}",
                    "status": "created"
                }
            }

        except Exception as e:
            logger.exception('Failed to create session:')
            raise HTTPException(status_code=500, detail=f"Failed to create session: {str(e)}")

    @app.post("/any4dh/offer")
    async def offer(request: OfferRequest):
        """
        会话创建接口
        """
        try:
            # 生成会话ID
            sessionid = randN(6)

            # 创建数字人实例（使用标准版本避免事件循环问题）
            any4dh_real = await asyncio.get_event_loop().run_in_executor(
                None, build_any4dh_real, sessionid, False
            )

            any4dh_reals[sessionid] = any4dh_real

            logger.info(f"Session {sessionid} created via offer, waiting for WebSocket connection")
            logger.info(f'Created session {sessionid}, total sessions: {len(any4dh_reals)}')

            return {
                "sdp": f"websocket-session-{sessionid}",
                "type": "answer",
                "sessionid": sessionid,
                "transport": "websocket",
                "websocket_url": f"/ws/{sessionid}"
            }

        except Exception as e:
            logger.exception('Failed to create session via offer endpoint:')
            raise HTTPException(status_code=500, detail=f"Failed to create session: {str(e)}")

    # 内部接口 - 仅用于会话管理
    @app.post("/any4dh/_internal/create_session")
    async def _create_session(request: _CreateSessionRequest):
        """内部创建会话接口"""
        global websocket_server

        try:
            sessionid = randN(6)
            use_optimized = request.use_optimized

            if request.avatar_id:
                opt.avatar_id = request.avatar_id

            any4dh_real = await asyncio.get_event_loop().run_in_executor(
                None, build_any4dh_real, sessionid, use_optimized
            )

            any4dh_reals[sessionid] = any4dh_real

            return {
                "sessionid": sessionid,
                "status": "created"
            }

        except Exception as e:
            logger.exception('Failed to create internal session:')
            raise HTTPException(status_code=500, detail=f"Failed to create session: {str(e)}")

    @app.post("/any4dh/human")
    async def human(request: HumanRequest):
        """处理人机交互请求"""
        try:
            sessionid = request.sessionid

            if sessionid not in any4dh_reals:
                raise HTTPException(status_code=404, detail="Session not found")

            if request.interrupt:
                any4dh_reals[sessionid].flush_talk()

            # 检查渲染是否已启动
            any4dh_real = any4dh_reals[sessionid]
            if not hasattr(any4dh_real, '_render_started') or not any4dh_real._render_started:
                logger.info(f"Starting render for session {sessionid} (text input)")
                import threading
                quit_event = threading.Event()
                any4dh_real._render_started = True
                any4dh_real._quit_event = quit_event

                # 在单独的线程中启动渲染
                render_thread = threading.Thread(
                    target=any4dh_real.render,
                    args=(quit_event,),
                    daemon=True,
                    name=f"render-{sessionid}"
                )
                render_thread.start()
                logger.info(f"Render thread started for session {sessionid} (text input)")
            else:
                logger.info(f"Render already running for session {sessionid}")

            if request.type == 'echo':
                any4dh_reals[sessionid].put_msg_txt(request.text)
            elif request.type == 'chat':
                asyncio.get_event_loop().run_in_executor(None, llm_response, request.text, any4dh_reals[sessionid])

            return {"code": 0, "msg": "ok"}
        except HTTPException:
            raise
        except Exception as e:
            logger.exception('Human request processing failed:')
            return {"code": -1, "msg": str(e)}

    @app.post("/any4dh/humanaudio")
    async def humanaudio(
        sessionid: int = Form(...),
        file: UploadFile = File(...)
    ):
        """上传音频文件进行处理"""
        try:
            if sessionid not in any4dh_reals:
                raise HTTPException(status_code=404, detail="Session not found")

            filebytes = await file.read()
            any4dh_reals[sessionid].put_audio_file(filebytes)
            return {"code": 0, "msg": "ok"}
        except HTTPException:
            raise
        except Exception as e:
            logger.exception('Humanaudio processing failed:')
            return {"code": -1, "msg": str(e)}

    @app.post("/any4dh/interrupt_talk")
    async def interrupt_talk(request: InterruptRequest):
        """中断当前语音"""
        try:
            sessionid = request.sessionid

            if sessionid not in any4dh_reals:
                raise HTTPException(status_code=404, detail="Session not found")

            any4dh_reals[sessionid].flush_talk()
            return {"code": 0, "msg": "ok"}
        except Exception as e:
            logger.exception('Exception:')
            return {"code": -1, "msg": str(e)}
    
    @app.post("/any4dh/set_audiotype")
    async def set_audiotype(request: SetAudioTypeRequest):
        """设置会话音频类型"""
        try:
            sessionid = request.sessionid

            if sessionid not in any4dh_reals:
                raise HTTPException(status_code=404, detail="Session not found")

            any4dh_reals[sessionid].set_custom_state(request.audiotype, request.reinit)
            return {"code": 0, "msg": "ok"}
        except Exception as e:
            logger.exception('Exception:')
            return {"code": -1, "msg": str(e)}

    @app.post("/any4dh/record")
    async def record(request: RecordRequest):
        """开始或停止录音"""
        try:
            sessionid = request.sessionid

            if sessionid not in any4dh_reals:
                raise HTTPException(status_code=404, detail="Session not found")

            if request.type == 'start_record':
                any4dh_reals[sessionid].start_recording()
            elif request.type == 'end_record':
                any4dh_reals[sessionid].stop_recording()
            return {"code": 0, "msg": "ok"}
        except Exception as e:
            logger.exception('exception:')
            return {"code": -1, "msg": str(e)}

    @app.post("/any4dh/is_speaking")
    async def is_speaking(request: IsSpeakingRequest):
        """检查数字人是否正在说话"""
        sessionid = request.sessionid

        if sessionid not in any4dh_reals:
            raise HTTPException(status_code=404, detail="Session not found")

        return {"code": 0, "data": any4dh_reals[sessionid].is_speaking()}

    @app.post("/any4dh/voice-chat")
    async def voice_chat(
        file: UploadFile = File(...),
        sessionid: Optional[str] = Form(None)
    ):
        """
        语音对话接口：录音 -> ASR -> LLM -> TTS
        """
        try:
            logger.info(f"Received voice chat request: file={file.filename if file else 'None'}, sessionid={sessionid}")
            # 检查文件大小限制 (10MB)
            max_file_size = 10 * 1024 * 1024  # 10MB
            file_content = await file.read()

            if len(file_content) > max_file_size:
                return {
                    "success": False,
                    "error": "音频文件过大，请限制在10MB以内"
                }

            if len(file_content) == 0:
                return {
                    "success": False,
                    "error": "音频文件为空"
                }

            # 1. ASR语音识别
            recognized_text = await transcribe_audio(file_content)

            if not recognized_text or not recognized_text.strip():
                return {
                    "success": False,
                    "error": "语音识别结果为空，请说话清晰一些"
                }

            logger.info(f"ASR recognition result: {recognized_text}")

            # 2. LLM对话处理
            response_text = await process_llm_chat(recognized_text)

            if not response_text or not response_text.strip():
                response_text = "抱歉，我现在无法回答这个问题。"

            logger.info(f"LLM response result: {response_text}")

            # 3. TTS语音合成
            audio_url = await synthesize_speech(response_text)

            # 4. 如果有活动的数字人会话，将音频传递给数字人进行嘴唇同步
            audio_synced = False

            # 尝试将sessionid转换为整数
            sessionid_int = None
            if sessionid:
                try:
                    sessionid_int = int(sessionid)
                except (ValueError, TypeError):
                    logger.warning(f"Invalid sessionid format: {sessionid}")

            # 检查数字人会话是否存在
            if not (sessionid_int and sessionid_int in any4dh_reals):
                active_sessions = list(any4dh_reals.keys())
                logger.warning(f"Failed to sync audio: sessionid={sessionid} (int: {sessionid_int}), active sessions={active_sessions}")

            if sessionid_int and sessionid_int in any4dh_reals:
                try:
                    # 读取音频文件
                    filename = audio_url[len('/temp_audio/'):]
                    audio_file_path = filename

                    if os.path.exists(audio_file_path):
                        with open(audio_file_path, 'rb') as f:
                            audio_bytes = f.read()
                        any4dh_reals[sessionid_int].put_audio_file(audio_bytes)
                        audio_synced = True

                        # 清理临时音频文件
                        if audio_url.startswith('/temp_audio/'):
                            try:
                                os.remove(audio_file_path)
                            except Exception as e:
                                logger.warning(f"Failed to clean up temporary file: {e}")
                except Exception as e:
                    logger.warning(f"Failed to sync audio to digital human: {e}")
            else:
                active_sessions = list(any4dh_reals.keys())
                logger.warning(f"Failed to sync audio: sessionid={sessionid}, active sessions={active_sessions}")

                # 尝试使用最新的活动会话
                if active_sessions and len(active_sessions) > 0:
                    latest_session = active_sessions[-1]
                    try:
                        # 临时音频文件，直接在根目录下
                        filename = audio_url[len('/temp_audio/'):]
                        audio_file_path = filename

                        if os.path.exists(audio_file_path):
                            with open(audio_file_path, 'rb') as f:
                                audio_bytes = f.read()
                            any4dh_reals[latest_session].put_audio_file(audio_bytes)
                            audio_synced = True
                    except Exception as e:
                        logger.warning(f"Failed to sync audio to latest digital human: {e}")

            # 5. 获取音频时长信息
            audio_duration = 0
            if audio_synced:
                try:
                    import librosa
                    filename = audio_url[len('/temp_audio/'):]
                    audio_file_path = filename

                    if os.path.exists(audio_file_path):
                        # 使用librosa获取音频时长
                        y, sr = librosa.load(audio_file_path)
                        audio_duration = int(librosa.get_duration(y=y, sr=sr) * 1000)  # 转换为毫秒
                except ImportError:
                    # 如果没有librosa，使用文件大小估算（假设平均比特率32kbps）
                    filename = audio_url[len('/temp_audio/'):]
                    audio_file_path = filename

                    if os.path.exists(audio_file_path):
                        file_size = os.path.getsize(audio_file_path)
                        audio_duration = (file_size * 8) // (32 * 1000) * 1000  # 估算毫秒
                except Exception as e:
                    logger.warning(f"Failed to get audio duration: {e}")
                    audio_duration = 15000  # 默认15秒

            # 清理临时文件
            if audio_url.startswith('/temp_audio/'):
                try:
                    filename = audio_url[len('/temp_audio/'):]
                    if os.path.exists(filename):
                        os.remove(filename)
                except Exception as e:
                    logger.warning(f"Failed to clean up temporary file: {e}")

            # 6. 返回完整结果 - 始终不返回前端音频URL，强制使用数字人播放
            return {
                "success": True,
                "recognized_text": recognized_text,
                "response_text": response_text,
                "audio_url": None,
                "audio_synced": audio_synced,
                "audio_file": audio_url.split('/')[-1] if audio_synced else None,  # 返回文件名用于状态显示
                "audio_duration": audio_duration,  # 音频时长（毫秒）
                "session_id": sessionid or "voice_chat_session",
                "timestamp": int(time.time())
            }

        except HTTPException as he:
            logger.error(f"HTTP exception: {he.status_code} - {he.detail}")
            return {
                "success": False,
                "error": f"HTTP错误 {he.status_code}: {he.detail}"
            }
        except Exception as e:
            logger.exception('Voice chat processing exception:')
            return {
                "success": False,
                "error": f"语音处理失败: {str(e)}"
            }

    @app.post("/any4dh/voice-chat-stream")
    async def voice_chat_stream(
        file: UploadFile = File(...),
        sessionid: Optional[str] = Form(None)
    ):
        """
        流式语音对话接口：录音 -> ASR -> 流式LLM -> 分段TTS -> 数字人播放
        支持实时音频流输出，用户可以更快听到AI回复
        """
        try:
            # 检查文件大小限制 (10MB)
            max_file_size = 10 * 1024 * 1024  # 10MB
            file_content = await file.read()

            if len(file_content) > max_file_size:
                async def error_response():
                    yield f"data: {json.dumps({'type': 'error', 'message': '音频文件过大，请限制在10MB以内'})}\n\n"
                return StreamingResponse(error_response(), media_type="text/plain")

            if len(file_content) == 0:
                async def error_response():
                    yield f"data: {json.dumps({'type': 'error', 'message': '音频文件为空'})}\n\n"
                return StreamingResponse(error_response(), media_type="text/plain")

            # 1. ASR语音识别
            recognized_text = await transcribe_audio(file_content)

            if not recognized_text or not recognized_text.strip():
                async def error_response():
                    yield f"data: {json.dumps({'type': 'error', 'message': '语音识别结果为空，请说话清晰一些'})}\n\n"
                return StreamingResponse(error_response(), media_type="text/plain")

            logger.info(f"ASR recognition result: {recognized_text}")

            # 2. 创建流式处理器
            from .streaming_utils import StreamingTTSProcessor
            processor = StreamingTTSProcessor(sessionid, any4dh_reals)

            async def generate_stream():
                try:
                    async for result in processor.process_streaming_response(recognized_text):
                        # 使用SSE格式发送数据
                        yield f"data: {json.dumps(result, ensure_ascii=False)}\n\n"
                except Exception as e:
                    logger.error(f"Stream processing exception: {str(e)}")
                    yield f"data: {json.dumps({'type': 'error', 'message': f'处理异常: {str(e)}'})}\n\n"

            return StreamingResponse(
                generate_stream(),
                media_type="text/plain",
                headers={
                    "Cache-Control": "no-cache",
                    "Connection": "keep-alive",
                    "Access-Control-Allow-Origin": "*"
                }
            )

        except HTTPException as he:
            logger.error(f"HTTP exception: {he.status_code} - {he.detail}")

            async def error_response():
                yield f"data: {json.dumps({'type': 'error', 'message': f'HTTP错误 {he.status_code}: {he.detail}'})}\n\n"
            return StreamingResponse(error_response(), media_type="text/plain")

        except Exception as e:
            logger.exception('Stream voice chat processing exception:')

            async def error_response():
                yield f"data: {json.dumps({'type': 'error', 'message': f'语音处理失败: {str(e)}'})}\n\n"
            return StreamingResponse(error_response(), media_type="text/plain")

    @app.post("/any4dh/play-audio")
    async def play_audio(request: PlayAudioRequest):
        """
        播放音频段到数字人
        用于流式模式下的音频播放
        """
        try:
            sessionid = request.sessionid

            if sessionid not in any4dh_reals:
                logger.warning(f"Session {sessionid} not found for audio playback")
                return {"code": -1, "msg": "Session not found"}

            # 获取音频文件路径
            audio_url = request.audio_url
            if not audio_url:
                return {"code": -1, "msg": "Audio URL is required"}

            # 处理不同类型的音频路径
            if audio_url.startswith('/temp_audio/'):
                # 临时音频文件
                filename = audio_url[len('/temp_audio/'):]
                full_audio_path = filename
            elif audio_url.startswith('data/'):
                # 预录音频文件
                full_audio_path = audio_url
            else:
                # 其他情况，直接使用原路径
                full_audio_path = audio_url

            if not os.path.exists(full_audio_path):
                logger.error(f"Audio file not found: {full_audio_path}")
                return {"code": -1, "msg": f"Audio file not found: {full_audio_path}"}

            # 读取音频文件并传给数字人
            try:
                with open(full_audio_path, 'rb') as f:
                    audio_bytes = f.read()

                # 传递音频到数字人实例
                any4dh_reals[sessionid].put_audio_file(audio_bytes)

                # 如果是临时音频文件，在发送后立即清理
                if audio_url.startswith('/temp_audio/'):
                    try:
                        os.remove(full_audio_path)
                    except Exception as e:
                        logger.warning(f"Failed to clean up temporary file: {e}")
                
                return {"code": 0, "msg": "Audio playback started successfully"}

            except Exception as audio_error:
                logger.error(f"Failed to process audio file: {str(audio_error)}")
                return {"code": -1, "msg": f"Failed to process audio: {str(audio_error)}"}

        except Exception as e:
            logger.exception('Audio playback exception:')
            return {"code": -1, "msg": f"Audio playback failed: {str(e)}"}

    # 临时音频文件访问端点
    @app.get("/temp_audio/{filename}")
    async def get_temp_audio(filename: str):
        """访问临时音频文件，使用自动清理机制"""
        # 临时音频文件直接在根目录下
        file_path = filename

        if not os.path.exists(file_path):
            raise HTTPException(status_code=404, detail="Temporary audio file not found")

        # 使用带清理功能的文件响应
        return file_response_with_cleanup(
            file_path,
            media_type="audio/mpeg",
            filename=filename,
            cleanup_file=file_path
        )

    # 临时文件管理状态端点
    @app.get("/temp_audio/status")
    async def get_temp_file_status():
        """获取临时文件管理状态"""
        from core.tts.temp_file_manager import get_temp_file_manager
        return get_temp_file_manager().get_status()

    # 清理临时文件端点
    @app.post("/temp_audio/cleanup")
    async def cleanup_temp_files():
        """手动清理所有临时文件"""
        from core.tts.temp_file_manager import get_temp_file_manager
        get_temp_file_manager().cleanup_all()
        return {"message": "Temporary files cleanup initiated"}

    # 语音知识库文件服务端点
    @app.get("/any4dh/voice/{audio_file}")
    async def get_voice_file(audio_file: str):
        """获取语音知识库文件"""
        from .voice_file_service import VoiceFileService
        return VoiceFileService.serve_voice_file(audio_file)

    @app.get("/any4dh/voice/info/{audio_file}")
    async def get_voice_file_info(audio_file: str):
        """获取语音文件信息"""
        from .voice_file_service import VoiceFileService
        return VoiceFileService.get_voice_file_info(audio_file)

# 语音对话辅助函数
async def transcribe_audio(file_content: bytes) -> str:
    """ASR语音识别"""
    try:
        import torchaudio
        from io import BytesIO
        from funasr.utils.postprocess_utils import rich_transcription_postprocess
        from core.model_manager import ModelManager

        # 直接加载音频数据
        data, fs = torchaudio.load(BytesIO(file_content))
        data = data.mean(0)  # 转换为单声道

        # 获取ASR模型并进行推理
        m, kwargs = ModelManager.get_asr_model()
        res = m.inference(
            data_in=[data],
            language="auto",
            use_itn=False,
            ban_emo_unk=False,
            key=["voice_chat"],
            fs=fs,
            **kwargs,
        )

        if not res or not res[0]:
            logger.warning("ASR returned empty result")
            return ""

        # 处理识别结果
        raw_text = res[0][0]["text"]
        final_text = rich_transcription_postprocess(raw_text.replace("<|startoftranscript|>", "").replace("<|endofttranscript|>", ""))

        return final_text.strip()

    except Exception as e:
        logger.error(f"ASR processing failed: {str(e)}")
        return ""

async def process_llm_chat(text: str) -> str:
    """LLM对话处理"""
    try:
        # 调用LLM服务
        from core.chat.llm import llm_service

        def llm_call():
            llm_service = llm_service()
            # 使用同步方式调用异步方法
            import asyncio
            loop = asyncio.new_event_loop()
            try:
                result = loop.run_until_complete(llm_service.generate_response(text))
                return result
            finally:
                loop.close()

        response = await asyncio.get_event_loop().run_in_executor(None, llm_call)
        return response if response else "抱歉，我现在无法回答这个问题。"

    except Exception as e:
        logger.error(f"LLM processing failed: {str(e)}")
        return "抱歉，我现在无法回答这个问题。"

async def synthesize_speech(text: str) -> str:
    """TTS语音合成"""
    try:
        from core.tts.speech import create_speech
        from core.tts.index_tts_engine import IndexTTSEngine
        from fastapi import Request
        from io import BytesIO
        import uuid
        from config import Config

        # 使用统一临时文件管理器
        from core.tts.temp_file_manager import create_temp_voice_output_file
        output_path = create_temp_voice_output_file()
        filename = os.path.basename(output_path)

        # 首先尝试使用IndexTTS
        if Config.INDEX_TTS_MODEL_ENABLED:
            try:
                def tts_call():
                    index_tts_engine = IndexTTSEngine.get_instance({
                        'model_path': Config.INDEX_TTS_MODEL_DIR,
                        'device': Config.INDEX_TTS_DEVICE
                    })
                    return index_tts_engine.generate_speech(
                        text=text,
                        output_path=str(output_path),
                        voice="default"
                    )

                success = await asyncio.get_event_loop().run_in_executor(None, tts_call)

                if success and os.path.exists(output_path):
                    return f"/temp_audio/{filename}"
                else:
                    logger.warning("IndexTTS synthesis failed, trying edge-tts")

            except Exception as e:
                logger.warning(f"IndexTTS synthesis failed: {e}, trying edge-tts")

        # 备选方案：使用edge-tts
        def edge_tts_call():
            import asyncio
            from edge_tts import Communicate

            # 创建新的事件循环来运行异步edge-tts
            loop = asyncio.new_event_loop()
            try:
                communicate = Communicate(text, Config.EDGE_DEFAULT_VOICE)
                loop.run_until_complete(communicate.save(str(output_path)))
                return True
            finally:
                loop.close()

        success = await asyncio.get_event_loop().run_in_executor(None, edge_tts_call)

        if success and os.path.exists(output_path):
            return f"/temp_audio/{filename}"
        else:
            logger.error("TTS speech synthesis failed")
            raise Exception("TTS语音合成失败")

    except Exception as e:
        logger.error(f"TTS processing failed: {str(e)}")
        raise Exception(f"语音合成失败: {str(e)}")

# 全局初始化状态
_any4dh_initialized = False

async def initialize_any4dh(config=None, host="0.0.0.0", port=8888):
    """初始化 any4dh 数字人系统"""
    global opt, model, avatar, _any4dh_initialized
    opt, model, avatar = initialize_any4dh_basic(config)
    return opt, model, avatar

def initialize_any4dh_basic(config=None):
    """初始化 any4dh 数字人系统"""
    global opt, model, avatar, _any4dh_initialized

    if _any4dh_initialized:
        logger.info("any4dh already initialized, skipping")
        return opt, model, avatar

    if config is None:
        # 从环境变量或配置文件读取配置
        import sys
        # 获取项目根目录（any4any/）
        project_root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
        if project_root not in sys.path:
            sys.path.insert(0, project_root)
        from config import Config

        config = Config()

    # 创建配置对象
    class OptConfig:
        def __init__(self, cfg):
            self.fps = cfg.ANY4DH_FPS
            self.W = 450
            self.H = 450
            self.avatar_id = cfg.ANY4DH_AVATAR_ID
            self.batch_size = cfg.ANY4DH_BATCH_SIZE
            self.customvideo_config = ''
            self.tts = cfg.ANY4DH_TTS
            self.REF_FILE = cfg.ANY4DH_REF_FILE
            self.REF_TEXT = cfg.ANY4DH_REF_TEXT
            self.TTS_SERVER = cfg.ANY4DH_TTS_SERVER
            self.model = cfg.ANY4DH_MODEL
            self.transport = getattr(cfg, 'ANY4DH_TRANSPORT', 'stream')
            self.customopt = []
            self.l = 10
            self.m = 8
            self.r = 10
            self.listenport = getattr(opt, 'listenport', 8888)

            # TTS引擎配置
            self.tts_engine = cfg.ANY4DH_TTS
            self.index_tts_model_dir = cfg.INDEX_TTS_MODEL_DIR
            self.index_tts_device = cfg.INDEX_TTS_DEVICE

    opt = OptConfig(config)

    # 多进程启动方法由主应用设置，这里不再重复设置
    # mp.set_start_method('spawn')  # 由主应用的 app.py 统一设置

    # Wav2Lip 模型初始化
    from .live_talking.lipreal import LipReal, load_model, load_avatar, warm_up
    logger.info(f"Initializing any4dh with avatar_id: {opt.avatar_id}, transport: {opt.transport}")

    # 获取项目根目录
    project_root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

    # 使用绝对路径
    model_path = config.ANY4DH_WAV2LIP_MODEL_DIR
    if not os.path.isabs(model_path):
        model_path = os.path.join(project_root, model_path)

    avatar_path = os.path.join(config.ANY4DH_AVATARS_DIR, opt.avatar_id)
    if not os.path.isabs(avatar_path):
        avatar_path = os.path.join(project_root, avatar_path)

    model = load_model(model_path)
    avatar = load_avatar(avatar_path)
    warm_up(opt.batch_size, model, 256)

    _any4dh_initialized = True
    return opt, model, avatar

async def shutdown_any4dh():
    """关闭 any4dh 系统"""
    global any4dh_reals, websocket_connections

    logger.info("Shutting down any4dh system...")

    # 清理所有会话
    for sessionid in list(any4dh_reals.keys()):
        if sessionid in any4dh_reals:
            any4dh_reals[sessionid].flush_talk()

    # 清理WebSocket连接
    for connection in websocket_connections.values():
        try:
            await connection.close()
        except:
            pass

    any4dh_reals.clear()
    websocket_connections.clear()
    logger.info("any4dh system shutdown complete")

# RTMP 推流模式辅助函数
async def post(url, data):
    """向 URL 发送数据"""
    try:
        import aiohttp
        async with aiohttp.ClientSession() as session:
            async with session.post(url, data=data) as response:
                return await response.text()
    except Exception as e:
        logger.info(f'Error: {e}')

async def run_push(push_url, sessionid):
    """运行 RTMP 推流模式"""
    any4dh_real = await asyncio.get_event_loop().run_in_executor(None, build_any4dh_real, sessionid)
    any4dh_reals[sessionid] = any4dh_real

    pc = RTCPeerConnection()
    pcs.add(pc)

    @pc.on("connectionstatechange")
    async def on_connectionstatechange():
        if pc.connectionState == "failed":
            await pc.close()
            pcs.discard(pc)

    player = HumanPlayer(any4dh_reals[sessionid])
    audio_sender = pc.addTrack(player.audio)
    video_sender = pc.addTrack(player.video)

    await pc.setLocalDescription(await pc.createOffer())
    answer = await post(push_url, pc.localDescription.sdp)
    await pc.setRemoteDescription(RTCSessionDescription(sdp=answer, type='answer'))

# 独立运行模式（用于测试）
if __name__ == '__main__':
    from contextlib import asynccontextmanager

    @asynccontextmanager
    async def lifespan(app: FastAPI):
        """应用生命周期管理器"""
        yield  # 应用运行在此处

        # 关闭时清理
        coros = [pc.close() for pc in pcs]
        await asyncio.gather(*coros)
        pcs.clear()

    # 创建 FastAPI 应用
    app = FastAPI(
        title="any4dh API",
        description="基于 Wav2Lip 的实时交互数字人系统",
        version="1.0.0",
        lifespan=lifespan
    )

    # 设置全局app变量引用
    global_app = app

    # 添加 CORS 中间件 (包含WebSocket支持)
    app.add_middleware(
        CORSMiddleware,
        allow_origins=["*"],
        allow_credentials=True,
        allow_methods=["GET", "POST", "PUT", "DELETE", "OPTIONS", "HEAD", "PATCH", "websocket"],
        allow_headers=["*"],
    )

    # 静态文件服务
    app.mount("/static", StaticFiles(directory="static"), name="static")
    templates = Jinja2Templates(directory="static")

    # 注册路由
    register_any4dh_routes(app)

    # 添加WebSocket端点
    register_websocket_routes(app)

    parser = argparse.ArgumentParser()

    # 音频帧率
    parser.add_argument('--fps', type=int, default=50, help="音频帧率，必须为50")
    # 滑动窗口参数
    parser.add_argument('-l', type=int, default=10)
    parser.add_argument('-m', type=int, default=8)
    parser.add_argument('-r', type=int, default=10)

    # GUI 界面尺寸
    parser.add_argument('--W', type=int, default=450, help="GUI宽度")
    parser.add_argument('--H', type=int, default=450, help="GUI高度")

    # Wav2Lip 参数
    parser.add_argument('--avatar_id', type=str, default='001', help="指定data/avatars中的数字人")
    parser.add_argument('--batch_size', type=int, default=16, help="推理批量大小")

    parser.add_argument('--customvideo_config', type=str, default='', help="自定义动作JSON配置")

    parser.add_argument('--tts', type=str, default='edgetts', help="TTS服务类型")
    parser.add_argument('--REF_FILE', type=str, default="zh-CN-YunxiaNeural",help="参考文件名或语音模型ID")
    parser.add_argument('--REF_TEXT', type=str, default=None)
    parser.add_argument('--TTS_SERVER', type=str, default='http://127.0.0.1:9880')

    parser.add_argument('--model', type=str, default='wav2lip')

    parser.add_argument('--transport', type=str, default='stream', help="传输方式: stream, virtualcam")
    parser.add_argument('--host', type=str, default='0.0.0.0', help="服务器主机地址")
    parser.add_argument('--port', type=int, default=8888, help="服务器端口")
    parser.add_argument('--push_url', type=str, default='http://localhost:1985/rtc/v1/whip/?app=live&stream=livestream')

    parser.add_argument('--max_session', type=int, default=1)  # 多会话数量
    parser.add_argument('--listenport', type=int, default=8888, help="Web监听端口（集成模式使用8888，独立模式可指定其他端口）")
    parser.add_argument('--host', type=str, default='0.0.0.0', help="绑定的主机地址")

    opt = parser.parse_args()

    # 全局存储配置
    globals()['opt'] = opt

    opt.customopt = []
    if opt.customvideo_config != '':
        with open(opt.customvideo_config, 'r') as file:
            opt.customopt = json.load(file)

def register_websocket_routes(app: FastAPI):
    """注册WebSocket端点"""

    @app.websocket("/ws/{sessionid}")
    async def websocket_endpoint(websocket: WebSocket, sessionid: str):
        """WebSocket连接端点"""
        try:
            await websocket.accept()
        except Exception as e:
            logger.error(f"Failed to accept WebSocket connection: {e}")
            return

        try:
            # 转换sessionid为整数
            try:
                sessionid_int = int(sessionid)
            except ValueError:
                logger.error(f"Invalid sessionid format: {sessionid}")
                await websocket.send_json({
                    'type': 'error',
                    'error': f'Invalid sessionid format: {sessionid}'
                })
                await websocket.close()
                return

            # 检查会话是否存在
            if sessionid_int not in any4dh_reals:
                logger.error(f"Session {sessionid_int} not found")
                logger.error(f"Available sessions: {list(any4dh_reals.keys())}")
                await websocket.send_json({
                    'type': 'error',
                    'error': f'Session {sessionid_int} not found'
                })
                await websocket.close()
                return

            # 获取数字人实例
            any4dh_real = any4dh_reals[sessionid_int]

            # 创建媒体流处理器
            streamer = MediaStreamer(sessionid_int)

            # 设置媒体流处理器
            any4dh_real.set_media_streamer(streamer)
            any4dh_real.set_async_mode(True)

            # 注册WebSocket连接（这将启动sender_loop）
            await streamer.register_client(websocket)

            # 启动渲染（如果还没有启动）
            try:
                if not hasattr(any4dh_real, '_render_started') or not any4dh_real._render_started:
                    import threading
                    # 使用线程安全的事件对象
                    quit_event = threading.Event()
                    any4dh_real._render_started = True
                    any4dh_real._quit_event = quit_event
                    any4dh_real._session_loop = asyncio.get_running_loop()  # 保存当前事件循环

                    # 在单独的线程中启动渲染
                    render_thread = threading.Thread(
                        target=any4dh_real.render,
                        args=(quit_event,),
                        daemon=True,
                        name=f"render-{sessionid_int}"
                    )
                    render_thread.start()

                    # 给渲染线程一些时间开始发送帧
                    import time
                    time.sleep(0.1)

                else:
                    logger.info(f"Render already running for session {sessionid_int}")
            except Exception as e:
                logger.error(f"Error during render startup for session {sessionid_int}: {e}")
                import traceback
                logger.error(f"Render startup traceback: {traceback.format_exc()}")

            # 存储连接
            websocket_connections[sessionid_int] = websocket

            # 发送连接成功消息
            await websocket.send_json({
                'type': 'session_joined',
                'sessionid': sessionid,
                'status': 'success',
                'timestamp': asyncio.get_event_loop().time()
            })

            logger.info(f"WebSocket connection established for session {sessionid}")

            # 处理WebSocket消息 - 使用非阻塞方式
            try:
                while True:
                    try:
                        # 使用非阻塞方式接收消息，设置超时
                        data = await asyncio.wait_for(websocket.receive_text(), timeout=0.1)
                        await process_websocket_message(websocket, sessionid, data)
                    except asyncio.TimeoutError:
                        # 超时是正常的，继续循环，让渲染线程发送帧
                        continue
                    except WebSocketDisconnect:
                        logger.info(f"WebSocket client disconnected for session {sessionid}")
                        break
            except WebSocketDisconnect:
                logger.info(f"WebSocket client disconnected for session {sessionid}")
            except Exception as e:
                logger.error(f"WebSocket error for session {sessionid}: {e}")
                import traceback
                logger.error(f"WebSocket error traceback: {traceback.format_exc()}")
            finally:
                # 清理连接
                if sessionid_int in websocket_connections:
                    del websocket_connections[sessionid_int]

                # 注销WebSocket连接（这将停止sender_loop）
                try:
                    await streamer.unregister_client()
                    logger.info(f"MediaStreamer unregistered for session {sessionid_int}")
                except Exception as e:
                    logger.error(f"Error unregistering MediaStreamer for session {sessionid_int}: {e}")

                logger.info(f"WebSocket cleanup completed for session {sessionid_int}")

        except Exception as e:
            logger.error(f"Error handling WebSocket connection: {e}")
            try:
                await websocket.close()
            except:
                pass

async def process_websocket_message(websocket: WebSocket, sessionid: str, message: str):
    """处理WebSocket消息"""
    try:
        data = json.loads(message)
        msg_type = data.get('type')

        # 转换sessionid为整数以便查找
        try:
            sessionid_int = int(sessionid)
        except ValueError:
            await websocket.send_json({
                'type': 'error',
                'error': f'Invalid sessionid format: {sessionid}'
            })
            return

        any4dh_real = any4dh_reals.get(sessionid_int)

        if not any4dh_real:
            await websocket.send_json({
                'type': 'error',
                'error': f'Session {sessionid_int} not found'
            })
            return

        if msg_type == 'join_session':
            # 客户端加入会话的确认消息
            logger.info(f"Client joined session {sessionid_int}")
            await websocket.send_json({
                'type': 'session_joined',
                'status': 'success',
                'sessionid': sessionid_int,
                'timestamp': asyncio.get_event_loop().time()
            })

        elif msg_type == 'text_input':
            text = data.get('text', '')
            input_type = data.get('input_type', 'echo')

            if input_type == 'echo':
                any4dh_real.put_msg_txt(text)
            elif input_type == 'chat':
                asyncio.get_event_loop().run_in_executor(None, llm_response, text, any4dh_real)

            # 发送确认消息
            await websocket.send_json({
                'type': 'text_input_received',
                'sessionid': sessionid_int,
                'text': text,
                'input_type': input_type,
                'timestamp': asyncio.get_event_loop().time()
            })

        elif msg_type == 'audio_input':
            audio_data = data.get('audio_data', '')

            try:
                import base64
                audio_bytes = base64.b64decode(audio_data)
                any4dh_real.put_audio_file(audio_bytes)

                await websocket.send_json({
                    'type': 'audio_input_received',
                    'sessionid': sessionid_int,
                    'audio_size': len(audio_bytes),
                    'timestamp': asyncio.get_event_loop().time()
                })
            except Exception as e:
                logger.error(f"Error processing audio input: {e}")
                await websocket.send_json({
                    'type': 'error',
                    'error': f"Audio input processing failed: {str(e)}"
                })

        elif msg_type == 'interrupt':
            try:
                any4dh_real.flush_talk()
                await websocket.send_json({
                    'type': 'interrupt_completed',
                    'sessionid': sessionid_int,
                    'timestamp': asyncio.get_event_loop().time()
                })
            except Exception as e:
                logger.error(f"Error processing interrupt: {e}")
                await websocket.send_json({
                    'type': 'error',
                    'error': f"Interrupt processing failed: {str(e)}"
                })

        elif msg_type == 'ping':
            await websocket.send_json({
                'type': 'pong',
                'timestamp': asyncio.get_event_loop().time(),
                'client_timestamp': data.get('timestamp')
            })

        elif msg_type == 'get_status':
            streamer = any4dh_real.media_streamer if hasattr(any4dh_real, 'media_streamer') else None
            stats = streamer.get_connection_stats() if streamer else {}

            stats['is_speaking'] = any4dh_real.is_speaking()

            await websocket.send_json({
                'type': 'status_response',
                'sessionid': sessionid_int,
                'stats': stats,
                'timestamp': asyncio.get_event_loop().time()
            })

        elif msg_type == 'set_audiotype':
            audiotype = data.get('audiotype')
            reinit = data.get('reinit', True)

            try:
                any4dh_real.set_custom_state(audiotype, reinit)
                await websocket.send_json({
                    'type': 'audiotype_set',
                    'sessionid': sessionid_int,
                    'audiotype': audiotype,
                    'reinit': reinit,
                    'timestamp': asyncio.get_event_loop().time()
                })
            except Exception as e:
                logger.error(f"Error setting audiotype: {e}")
                await websocket.send_json({
                    'type': 'error',
                    'error': f"Set audiotype failed: {str(e)}"
                })

        else:
            logger.warning(f"Unknown message type: {msg_type}")
            await websocket.send_json({
                'type': 'error',
                'error': f"Unknown message type: {msg_type}"
            })

    except json.JSONDecodeError:
        logger.error("Invalid JSON message received")
        await websocket.send_json({
            'type': 'error',
            'error': "Invalid JSON message"
        })
    except Exception as e:
        logger.error(f"Error processing message: {e}")
        await websocket.send_json({
            'type': 'error',
            'error': f"Message processing error: {str(e)}"
        })

# 主程序入口
def main():
    """主程序入口"""
    # 初始化 any4dh
    initialize_any4dh_basic()

    logger.info(f'Starting FastAPI server: http://<serverip>:{opt.listenport}/dh/dashboard')

    # 启动服务器
    import uvicorn

    # 处理 rtcpush 模式启动任务
    if opt.transport == 'rtcpush':
        @asynccontextmanager
        async def rtcpush_lifespan(app: FastAPI):
            """rtcpush 模式的自定义生命周期"""
            async def start_push_tasks():
                for k in range(opt.max_session):
                    push_url = opt.push_url
                    if k != 0:
                        push_url = opt.push_url + str(k)
                    await run_push(push_url, k)

            asyncio.create_task(start_push_tasks())
            yield
            coros = [pc.close() for pc in pcs]
            await asyncio.gather(*coros)
            pcs.clear()

        global_app.router.lifespan_context = rtcpush_lifespan

    uvicorn.run(
        global_app,
        host=opt.host,
        port=opt.listenport,
        log_level="info"
    )

if __name__ == "__main__":
    main()