#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
@File    ：agent.py
@Author  ：平
@Date    ：2025/9/28 13:15 
"""
import json
from typing import cast, Any

from fastapi import APIRouter
from langchain_core.messages import HumanMessage, ToolMessage, AIMessageChunk, BaseMessage, AIMessage
from langgraph.graph.state import CompiledStateGraph
from starlette.responses import StreamingResponse

from app.repositry.chat_message_repo import query_messages_by_mix, save_message, query_conversation_by_user_id
from app.schema.agent import AgentStreamRequest
from app.graph.graph import graph
import logging

from app.schema.base_response import BaseResponse
from app.util.json_util import sanitize_args

logger = logging.getLogger(__name__)

router = APIRouter(prefix="/agent")


@router.post("/stream")
async def stream(request: AgentStreamRequest):
    # 读取前10条消息
    last_messages = query_messages_by_mix(request.threadId) or []
    last_messages.reverse()
    messages = []
    for msg in last_messages:
        if msg.role == "assistant":
            messages.append(
                AIMessage(content=msg.content, name=msg.agent)
            )
        else:
            messages.append(
                HumanMessage(content=msg.content)
            )

    # 保存当前消息
    human_message = save_message(user_id=request.userId, title=request.title or request.content[:20],
                                 thread_id=request.threadId,
                                 content=request.content,
                                 role="human")
    messages.append(HumanMessage(human_message.content))
    state = {
        "messages": messages
    }
    return StreamingResponse(
        _astream_graph_events(graph, state, request.userId, request.threadId, title=human_message.title),
        media_type="text/event-stream",
    )


async def _astream_graph_events(graph_instance: CompiledStateGraph, state: dict, user_id: int, thread_id: str,
                                title: str = None):
    final_content = ""
    agent_name = "unknown"
    async for agent, event_data in graph_instance.astream(
            state,
            config={"thread_id": thread_id},
            stream_mode="messages",
            subgraphs=True
    ):
        logger.info(f"数据：{event_data}")

        message_chunk, message_metadata = cast(
            tuple[BaseMessage, dict[str, Any]], event_data
        )

        if agent and len(agent) > 0:
            agent_name = agent[0].split(":")[0] if ":" in agent[0] else agent[0]
        else:
            agent_name = message_metadata.get("langgraph_node", "unknown")

        event_stream_message = {
            "thread_id": thread_id,
            "agent": agent_name,
            "content": message_chunk.content,
        }

        if isinstance(message_chunk, ToolMessage):
            # Tool Message - Return the result of the tool call
            event_stream_message["tool_call_id"] = message_chunk.tool_call_id
            event_stream_message['role'] = 'tool'
            yield _make_event("tool_call_result", event_stream_message)
        elif isinstance(message_chunk, AIMessageChunk):
            event_stream_message['role'] = 'assistant'
            final_content += message_chunk.content

            if message_chunk.response_metadata.get("finish_reason"):
                event_stream_message["finish_reason"] = message_chunk.response_metadata.get("finish_reason")
                # 存储ai回复消息
                save_message(user_id, thread_id, title=title, agent=agent_name, role='assistant', content=final_content,
                             finish_reason=message_chunk.response_metadata.get("finish_reason"))
                final_content = ""
            # AI Message - Raw message tokens
            if message_chunk.tool_calls:
                # AI Message - Tool Call
                event_stream_message["tool_calls"] = message_chunk.tool_calls
                event_stream_message["tool_call_chunks"] = _process_tool_call_chunks(
                    message_chunk.tool_call_chunks
                )
                yield _make_event("tool_calls", event_stream_message)
            elif message_chunk.tool_call_chunks:
                # AI Message - Tool Call Chunks
                event_stream_message["tool_call_chunks"] = _process_tool_call_chunks(
                    message_chunk.tool_call_chunks
                )
                yield _make_event("tool_call_chunks", event_stream_message)
            else:
                # AI Message - Raw message tokens
                yield _make_event("message_chunk", event_stream_message)


def _process_tool_call_chunks(tool_call_chunks):
    """Process tool call chunks and sanitize arguments."""
    chunks = []
    for chunk in tool_call_chunks:
        chunks.append(
            {
                "name": chunk.get("name", ""),
                "args": sanitize_args(chunk.get("args", "")),
                "id": chunk.get("id", ""),
                "index": chunk.get("index", 0),
                "type": chunk.get("type", ""),
            }
        )
    return chunks


def _make_event(event_type: str, data: dict[str, any]):
    if data.get("content") == "":
        data.pop("content")
    # Ensure JSON serialization with proper encoding
    try:
        json_data = json.dumps(data, ensure_ascii=False)

        return f"event: {event_type}\ndata: {json_data}\n\n"
    except (TypeError, ValueError) as e:
        logger.error(f"Error serializing event data: {e}")
        # Return a safe error event
        error_data = json.dumps({"error": "Serialization failed"}, ensure_ascii=False)
        return f"event: error\ndata: {error_data}\n\n"


@router.get("/conversation/list/{user_id}/{days}", response_model=BaseResponse)
async def conversation_list(user_id: int, days: int = 7):
    res = query_conversation_by_user_id(user_id, days)
    return BaseResponse(data=res)


@router.get("/message/list/{thread_id}", response_model=BaseResponse)
async def message_list(thread_id: str):
    res = query_messages_by_mix(thread_id, -1)
    return BaseResponse(data=[row.to_dict() for row in res])
