"""
聊天路由模块
提供聊天相关的API接口，包括普通聊天和流式聊天
简化了业务逻辑，将复杂处理移至服务层
"""

from fastapi import APIRouter, Depends, HTTPException, Request
from fastapi.responses import StreamingResponse
from typing import List, Optional, Dict, Any
import uuid
import logging
import json
from sqlalchemy import text

# 导入服务层
from ..services.chat_service import chat_service, ChatMessage, ChatRequest, ChatResponse
from ..services.jwt_service import jwt_required, send_token_request_background
from ..services.character_service import load_character_profile
from ..services.memory_service import MemoryService
from ..utils.character_utils import build_system_prompt
from ..utils.performance_utils import (
    format_messages_for_api, should_include_emotion
)
from ..core.rate_limiter import get_client_ip, check_rate_limit
from ..core.database import get_db_session

logger = logging.getLogger(__name__)

router = APIRouter(prefix="/chat", tags=["chat"])

# CORS预检请求处理
@router.options("/")
async def options_handler():
    """处理CORS预检请求"""
    return {}

# 创建服务实例
memory_service = MemoryService()


async def create_sse_event(event_type: str, data: Dict[str, Any]) -> str:
    """
    创建SSE事件字符串 - 保持与原版本格式完全一致
    
    Args:
        event_type: 事件类型
        data: 事件数据
        
    Returns:
        str: SSE格式的事件字符串
    """
    # 对于特定事件类型，使用原版本的格式
    if event_type == "message":
        return f"data: {json.dumps(data, ensure_ascii=False)}\n\n"
    elif event_type == "audio":
        return f"event: audio\ndata: {json.dumps(data, ensure_ascii=False)}\n\n"
    elif event_type == "done":
        return f"event: done\ndata: {json.dumps(data, ensure_ascii=False)}\n\n"
    else:
        return f"event: {event_type}\ndata: {json.dumps(data, ensure_ascii=False)}\n\n"


def convert_conversation_data_to_messages(conversation_data: Dict) -> List[Dict[str, str]]:
    """
    将数据库中的conversation_data转换为聊天消息格式
    
    Args:
        conversation_data: 数据库中的conversation_data字段值
        
    Returns:
        List[Dict[str, str]]: 格式化后的消息列表
    """
    formatted_messages = []
    
    if not conversation_data or "messages" not in conversation_data:
        return formatted_messages
    
    for msg in conversation_data["messages"]:
        # 处理文本类型和音频类型的消息
        if msg.get("type") in ["text", "audio"]:
            role = "user" if msg.get("sender") == "user" else "assistant"
            content = msg.get("content", "")
            if content:
                formatted_messages.append({
                    "role": role,
                    "content": content
                })
    
    return formatted_messages


async def get_conversation_history(user_uuid: str, ai_person_id: int) -> Optional[Dict]:
    """
    从数据库获取对话历史
    
    Args:
        user_uuid: 用户UUID
        ai_person_id: AI角色ID
        
    Returns:
        Optional[Dict]: 对话历史数据
    """
    if not user_uuid or not ai_person_id:
        return None
    
    try:
        with get_db_session() as session:
            # 执行SQL查询
            query = text("""
                SELECT conversation_data 
                FROM user_ai_conversations 
                WHERE user_uuid = :user_uuid AND ai_person_id = :ai_person_id
                ORDER BY updated_at DESC
                LIMIT 1
            """)
            
            result = session.execute(
                query, 
                {"user_uuid": user_uuid, "ai_person_id": ai_person_id}
            ).fetchone()
            
            if result and result[0]:
                # 返回conversation_data字段
                return json.loads(result[0]) if isinstance(result[0], str) else result[0]
    
    except Exception as e:
        logger.error(f"获取对话历史失败: {e}", exc_info=True)
    
    return None


async def stream_chat_response(
    messages: List[Dict[str, str]],
    session_id: str,
    generate_voice: bool = False,
    voice_settings: Optional[Dict] = None,
    system_prompt: Optional[str] = None,
    include_emotion: bool = False
):
    """
    生成流式聊天响应
    使用新的聊天服务进行流式处理
    
    Args:
        messages: 消息列表
        session_id: 会话ID
        generate_voice: 是否生成语音
        voice_settings: 语音设置
        system_prompt: 系统提示词
        include_emotion: 是否包含情感分析
        
    Yields:
        str: SSE格式的事件流
    """
    try:
        # 使用聊天服务的流式处理
        async for chunk in chat_service.process_stream_chat(
            messages=messages,
            session_id=session_id,
            generate_voice=generate_voice,
            voice_settings=voice_settings,
            system_prompt=system_prompt,
            include_emotion=include_emotion,
            memory_service=memory_service
        ):
            yield await create_sse_event(chunk["type"], chunk["data"])
            
    except Exception as e:
        logger.error(f"流式聊天响应失败: {e}", exc_info=True)
        yield await create_sse_event("error", {"error": str(e)})


@router.post("/", dependencies=[Depends(jwt_required)])
async def chat(
    request: ChatRequest,
    req: Request
) -> ChatResponse:
    """
    聊天接口
    处理用户的聊天请求，支持文本和语音生成
    
    Args:
        request: 聊天请求数据
        req: FastAPI请求对象
        
    Returns:
        ChatResponse: 聊天响应数据（格式与原版本完全一致）
    """
    try:
        # 速率限制检查
        client_ip = get_client_ip(req)
        check_rate_limit(client_ip)
        
        # 生成会话ID（如果未提供）
        session_id = request.session_id or str(uuid.uuid4())
        
        # 获取聊天ID（兼容两种命名风格）
        chat_id = request.chatId or request.chat_id
        
        # 加载角色配置
        character_profile = None
        if chat_id:
            character_profile = await load_character_profile(chat_id, request.language)
        
        # 构建系统提示词
        system_prompt = None
        if character_profile:
            system_prompt = build_system_prompt(character_profile)
        
        # 提取语音设置
        voice_settings = None
        if character_profile and "voice_settings" in character_profile:
            # 新数据结构：voice_settings直接包含所有设置
            voice_settings = character_profile["voice_settings"]
            # 兼容旧格式
            if isinstance(voice_settings, dict) and "minmax" in voice_settings:
                voice_settings = voice_settings.get("minmax", {})
        
        # 判断是否需要情感分析
        include_emotion = should_include_emotion(request.generate_voice, request.include_emotion)
        
        # 格式化消息
        formatted_messages = format_messages_for_api(request.messages)
        
        # 如果提供了uuid和chat_id，尝试从数据库获取历史对话
        if request.uuid and chat_id:
            conversation_data = await get_conversation_history(request.uuid, chat_id)
            if conversation_data:
                # 将数据库中的对话历史转换为消息格式
                db_messages = convert_conversation_data_to_messages(conversation_data)
                
                # 如果有历史消息，则将当前消息添加到历史消息中
                if db_messages:
                    # 只保留最后一条用户消息
                    if formatted_messages and formatted_messages[-1]["role"] == "user":
                        db_messages.append(formatted_messages[-1])
                    formatted_messages = db_messages
        # 获取历史记录并合并到当前消息中
        elif session_id:
            # 先添加用户当前消息到内存
            if formatted_messages and formatted_messages[-1]["role"] == "user":
                memory_service.add_message(session_id, "user", formatted_messages[-1]["content"])
            
            # 获取完整的历史记录（包括刚添加的用户消息）
            history_messages = memory_service.get_history(session_id, limit=20)
            
            # 如果有历史记录，使用历史记录；否则使用当前格式化的消息
            if history_messages:
                formatted_messages = history_messages
        
        if request.stream:
            # 返回流式响应
            try:
                response = StreamingResponse(
                    stream_chat_response(
                        formatted_messages,
                        session_id,
                        request.generate_voice or False,
                        voice_settings,
                        system_prompt,
                        include_emotion
                    ),
                    media_type="text/event-stream",
                    headers={
                        "Cache-Control": "no-cache",
                        "Connection": "keep-alive",
                        "Access-Control-Allow-Origin": "*",
                        "Access-Control-Allow-Headers": "*",
                    }
                )
                # 流式响应成功，发送确认请求
                send_token_request_background(request.reserve_id, True)
                return response
            except Exception as e:
                # 流式响应失败，发送取消请求
                send_token_request_background(request.reserve_id, False)
                raise e
        else:
            # 调用聊天服务处理请求
            try:
                result = await chat_service.process_chat(
                    messages=formatted_messages,
                    session_id=session_id,
                    generate_voice=request.generate_voice or False,
                    voice_settings=voice_settings,
                    system_prompt=system_prompt,
                    include_emotion=include_emotion
                )
                
                # 保存对话到内存
                if result.get("text"):
                    memory_service.add_message(session_id, "assistant", result["text"])
                
                # 聊天成功，发送确认请求
                send_token_request_background(request.reserve_id, True)
                
                # 返回响应（格式与原版本完全一致）
                return result
            except Exception as e:
                # 聊天失败，发送取消请求
                send_token_request_background(request.reserve_id, False)
                raise e
            
    except Exception as e:
        logger.error(f"聊天处理失败: {e}", exc_info=True)
        # 处理失败，发送取消请求
        send_token_request_background(request.reserve_id, False)
        raise HTTPException(status_code=500, detail="内部服务器错误")


@router.get("/{session_id}/history", dependencies=[Depends(jwt_required)])
async def get_chat_history(session_id: str, req: Request):
    """
    获取聊天历史记录
    
    Args:
        session_id: 会话ID
        req: FastAPI请求对象
        
    Returns:
        Dict: 聊天历史记录
    """
    try:
        # 速率限制检查
        client_ip = get_client_ip(req)
        check_rate_limit(client_ip)
        
        # 从内存服务获取历史记录
        history = memory_service.get_history(session_id)
        
        return {
            "session_id": session_id,
            "history": history,
            "count": len(history)
        }
        
    except Exception as e:
        logger.error(f"获取聊天历史失败: {e}", exc_info=True)
        raise HTTPException(status_code=500, detail="获取历史记录失败")


@router.delete("/{session_id}", dependencies=[Depends(jwt_required)])
async def clear_chat_history(session_id: str, req: Request):
    """
    清空聊天历史记录
    
    Args:
        session_id: 会话ID
        req: FastAPI请求对象
        
    Returns:
        Dict: 操作结果
    """
    try:
        # 速率限制检查
        client_ip = get_client_ip(req)
        check_rate_limit(client_ip)
        
        # 清空内存中的历史记录
        memory_service.clear_history(session_id)
        
        return {
            "session_id": session_id,
            "message": "聊天历史已清空",
            "status": "success"
        }
        
    except Exception as e:
        logger.error(f"清空聊天历史失败: {e}", exc_info=True)
        raise HTTPException(status_code=500, detail="清空历史记录失败")


@router.get("/test-sse")
async def test_sse():
    """SSE连接测试接口"""
    
    async def event_generator():
        """生成测试SSE事件"""
        # 生成一个测试用的会话ID
        test_session_id = str(uuid.uuid4())
        
        # 发送测试事件
        for i in range(5):
            yield await create_sse_event("test", {
                "message": f"测试消息 {i + 1}",
                "session_id": test_session_id,
                "timestamp": str(uuid.uuid4())
            })
            
            # 模拟延迟
            import asyncio
            await asyncio.sleep(1)
        
        # 发送完成事件
        yield await create_sse_event("complete", {
            "message": "测试完成",
            "session_id": test_session_id
        })
    
    return StreamingResponse(
        event_generator(),
        media_type="text/event-stream",
        headers={
            "Cache-Control": "no-cache",
            "Connection": "keep-alive",
            "Access-Control-Allow-Origin": "*",
            "Access-Control-Allow-Headers": "*",
        }
    )