import asyncio
import re
import base64
import logging
from datetime import datetime
from fastapi import WebSocket, WebSocketDisconnect
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy import select

from ..database import AsyncSessionLocal
from ..models import Character, Message
from ..services.llm_service import LLMService
from ..services.stt_service import stt_service
from ..services.tts_service import tts_service
from ..services.storage_service import storage_service
from ..services.rag_service import rag_service
from ..utils.query_utils import query_utils

logger = logging.getLogger(__name__)


class ConnectionManager:
    """WebSocket连接管理器 (全局唯一，保持不变)"""

    def __init__(self):
        self.active_connections: dict = {}

    async def connect(self, websocket: WebSocket, session_id: str):
        await websocket.accept()
        self.active_connections[session_id] = websocket

    def disconnect(self, session_id: str):
        self.active_connections.pop(session_id, None)

    async def send_message(self, message: dict, session_id: str):
        if websocket := self.active_connections.get(session_id):
            await websocket.send_json(message)


manager = ConnectionManager()


class WebSocketHandler:
    """
    高度集成的WebSocket处理器。
    此类合并了连接管理、消息处理和流式响应生成的所有逻辑。
    """
    SENTENCE_ENDS = re.compile(r'[。！？.!?]')
    MIN_TEXT_LENGTH = 2

    def __init__(self, websocket: WebSocket, session_id: str, conversation_id: int):
        self.websocket = websocket
        self.session_id = session_id
        self.conversation_id = conversation_id
        self.db: AsyncSession = None
        self.character: Character = None
        self.message_history: list = []
        self.text_buffer: str = ""
        self.audio_chunks: list[bytes] = []

    async def handle_connection(self):
        await manager.connect(self.websocket, self.session_id)
        logger.info(f"WebSocket连接建立: session={self.session_id}, conversation={self.conversation_id}")
        try:
            async with AsyncSessionLocal() as db:
                self.db = db
                await self._initialize_context()
                await self._message_loop()
        except WebSocketDisconnect:
            logger.info(f"WebSocket断开: session={self.session_id}")
        except Exception as e:
            logger.error(f"WebSocket错误: {e}", exc_info=True)
            await manager.send_message({"error": "服务器错误"}, self.session_id)
        finally:
            manager.disconnect(self.session_id)

    async def _initialize_context(self):
        conversation = await query_utils.get_conversation_with_character(self.db, self.conversation_id)
        if not conversation or not conversation.character:
            raise ValueError("对话或角色不存在")
        self.character = conversation.character
        messages = await self._get_recent_messages(limit=10)
        if messages:
            self.message_history = [{"role": msg.role, "content": msg.content} for msg in messages]
            await self._send_history_messages(messages)

    async def _message_loop(self):
        while True:
            data = await self.websocket.receive_json()
            message_type = data.get("type")
            user_content = None
            user_audio_bytes = None  # *** 修改点 1: 用于传递原始音频数据 ***

            if message_type == "load_history":
                await self._handle_load_history(data)
                continue

            elif message_type == "text":
                user_content = data.get("content")
                await manager.send_message({
                    "type": "transcription", "content": user_content, "is_corrected": True
                }, self.session_id)

            elif message_type == "audio":
                user_audio_bytes = base64.b64decode(data.get("data"))
                # 【保留阻塞】STT是必要的前置步骤，必须等待它完成
                user_content = await stt_service.transcribe_audio(user_audio_bytes)
                await manager.send_message({
                    "type": "transcription", "content": user_content, "is_corrected": True
                }, self.session_id)


            if user_content:
                # *** 修改点 3: 传递原始音频bytes，而不是url ***
                await self._generate_ai_response(user_content, user_audio_bytes=user_audio_bytes)

    # *** 修改点 4: 修改方法签名以接收bytes ***
    async def _generate_ai_response(self, user_content: str, user_audio_bytes: bytes | None = None):
        """
        AI响应生成与流式处理核心。
        上传用户音频的操作被放入后台执行。
        """
        # === 第1步: 准备工作 (RAG & 后台任务) ===
        user_audio_upload_task = None
        # *** 修改点 5: 如果有音频数据，创建后台上传任务 ***
        if user_audio_bytes:
            user_audio_upload_task = asyncio.create_task(
                storage_service.upload_audio(user_audio_bytes),
                name=f"upload_audio_{self.session_id}"
            )

        rag_context = ""
        if self.character.use_knowledge_base:
            chunks = await rag_service.search_knowledge(
                self.db, self.character.id, user_content, k=self.character.knowledge_search_k
            )
            if chunks:
                rag_context = await rag_service.build_context_prompt(chunks)
        enhanced_prompt = self.character.prompt_template
        if rag_context:
            enhanced_prompt += f"\n\n相关知识:\n{rag_context}"

        # === 第2步: 初始化流式处理的局部状态 ===
        ai_full_content = ""
        full_audio_chunks = []
        text_buffer = ""

        # === 第3步: 核心流式处理循环 ===
        # *** LLM生成现在可以立即开始，不再被用户音频上传阻塞 ***
        self.message_history.append({"role": "user", "content": user_content})
        llm_stream = LLMService.generate_response(
            messages=self.message_history, character_prompt=enhanced_prompt
        )
        async for chunk in llm_stream:
            ai_full_content += chunk
            text_buffer += chunk
            await manager.send_message({"type": "text_stream", "content": chunk}, self.session_id)

            while match := self.SENTENCE_ENDS.search(text_buffer):
                sentence = text_buffer[:match.end()].strip()
                text_buffer = text_buffer[match.end():].lstrip()
                if len(sentence) >= self.MIN_TEXT_LENGTH:
                    await self._process_sentence_tts(sentence, full_audio_chunks)

        # === 第4步: 收尾工作 (处理缓冲区剩余文本) ===
        if remaining_text := text_buffer.strip():
            if len(remaining_text) >= self.MIN_TEXT_LENGTH:
                await self._process_sentence_tts(remaining_text, full_audio_chunks)

        # === 第5步: 保存最终结果 (音频上传与数据库记录) ===
        # AI音频可以并行上传
        ai_audio_upload_task = None
        if full_audio_chunks:
            full_audio = b"".join(full_audio_chunks)
            ai_audio_upload_task = asyncio.create_task(
                storage_service.upload_audio(full_audio),
                name=f"upload_ai_audio_{self.session_id}"
            )

        # *** 修改点 6: 等待后台任务完成以获取URL ***
        user_audio_url = await user_audio_upload_task if user_audio_upload_task else None

        ai_audio_url = None
        if ai_audio_upload_task:
            try:
                ai_audio_url = await ai_audio_upload_task
                await manager.send_message({"type": "audio_complete", "url": ai_audio_url}, self.session_id)
            except Exception as e:
                logger.error(f"AI音频上传失败: {e}")

        # *** 修改点 7: 统一数据库事务 ***
        user_message = Message(
            conversation_id=self.conversation_id, role="user", content=user_content, audio_url=user_audio_url
        )
        ai_message = Message(
            conversation_id=self.conversation_id, role="assistant", content=ai_full_content, audio_url=ai_audio_url
        )
        self.db.add_all([user_message, ai_message])
        await self.db.commit()

        self.message_history.append({"role": "assistant", "content": ai_full_content})

    async def _process_sentence_tts(self, sentence: str, audio_chunks_list: list):
        """辅助方法：处理单个句子的TTS并发送音频流"""
        try:
            tts_stream = tts_service.synthesize_stream(text=sentence, voice_id=self.character.voice_id)
            async for audio_chunk in tts_stream:
                audio_chunks_list.append(audio_chunk)
                await manager.send_message({
                    "type": "audio_stream",
                    "data": base64.b64encode(audio_chunk).decode('utf-8')
                }, self.session_id)
        except Exception as e:
            logger.error(f"TTS合成失败 for sentence '{sentence}': {e}")

    # === 辅助方法 ===
    async def _handle_load_history(self, data: dict):
        before_str = data.get("before_created_at")
        if not before_str: return
        try:
            before_dt = datetime.fromisoformat(before_str.replace("Z", "+00:00"))
            older_messages = await self._get_recent_messages(before_dt=before_dt, limit=20)
            if older_messages:
                await self._send_history_messages(older_messages)
        except Exception as e:
            logger.error(f"加载历史消息失败: {e}")

    async def _get_recent_messages(self, limit: int, before_dt: datetime = None) -> list[Message]:
        query = select(Message).where(Message.conversation_id == self.conversation_id)
        if before_dt:
            query = query.where(Message.created_at < before_dt)
        query = query.order_by(Message.created_at.desc()).limit(limit)
        result = await self.db.execute(query)
        messages = result.scalars().all()
        return list(reversed(messages))

    async def _send_history_messages(self, messages: list[Message]):
        history_payload = [
            {"role": msg.role, "content": msg.content, "created_at": msg.created_at.isoformat(),
             "audio_url": msg.audio_url}
            for msg in messages
        ]
        await manager.send_message({"type": "history", "messages": history_payload}, self.session_id)


async def handle_websocket(websocket: WebSocket, session_id: str, conversation_id: int):
    """
    处理WebSocket连接的简洁入口点。
    为每个连接创建一个高度集成的处理器实例并运行它。
    """
    handler = WebSocketHandler(websocket, session_id, conversation_id)
    await handler.handle_connection()