import inspect
import json
import logging
import warnings
from collections.abc import AsyncGenerator
from contextlib import asynccontextmanager
from typing import Annotated, Any
from uuid import UUID, uuid4

from fastapi import APIRouter, Depends, FastAPI, HTTPException, status
from fastapi.responses import StreamingResponse
from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer
from langchain_core._api import LangChainBetaWarning
from langchain_core.messages import AIMessage, AIMessageChunk, AnyMessage, HumanMessage, ToolMessage
from langchain_core.runnables import RunnableConfig
from langfuse import Langfuse
from langfuse.callback import CallbackHandler
from langgraph.types import Command, Interrupt
from langsmith import Client as LangsmithClient

from agents import DEFAULT_AGENT, AgentGraph, get_agent, get_all_agent_info
from core import settings
from memory import initialize_database, initialize_store
from schema import (
    ChatHistory,
    ChatHistoryInput,
    ChatMessage,
    Feedback,
    FeedbackResponse,
    ServiceMetadata,
    StreamInput,
    UserInput,
)
from service.utils import (
    convert_message_content_to_string,
    langchain_to_chat_message,
    remove_tool_calls,
)

warnings.filterwarnings("ignore", category=LangChainBetaWarning)
logger = logging.getLogger(__name__)


# 权限认证
def verify_bearer(
        http_auth: Annotated[
            HTTPAuthorizationCredentials | None,
            Depends(HTTPBearer(description="Please provide AUTH_SECRET api key.", auto_error=False)),
        ],
) -> None:
    if not settings.AUTH_SECRET:
        return
    auth_secret = settings.AUTH_SECRET.get_secret_value()
    if not http_auth or http_auth.credentials != auth_secret:
        raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED)


@asynccontextmanager
async def lifespan(app: FastAPI) -> AsyncGenerator[None, None]:
    """
    # 这是一个异步上下文管理器，用于管理 FastAPI 应用的生命周期
    # 在应用启动时执行初始化，在应用关闭时执行清理
    """
    try:
        # 使用异步上下文管理器同时初始化数据库检查点(checkpointer)和存储(store)
        # 检查点用于短期记忆(对话历史)，存储用于长期记忆(跨对话知识)
        async with initialize_database() as saver, initialize_store() as store:
            # saver 数据库检查点，管理短期记忆（对话历史）， 记录上下文状态
            # store 存储服务，管理长期记忆（跨对话知识），长久记忆
            if hasattr(saver, "setup"):
                # 如果检查点对象有 setup 方法，则调用它进行初始化
                await saver.setup()

            # 注意：内存存储可能不需要初始化，只有持久化存储需要
            if hasattr(store, "setup"):
                await store.setup()

            # 获取所有可用代理的信息
            agents = get_all_agent_info()  # [key+description]的列表
            for a in agents:
                # 根据代理键获取代理实例， agent的名称
                agent = get_agent(a.key)
                # 为代理设置检查点(用于线程范围的记忆 - 对话历史)
                agent.checkpointer = saver
                # 为代理设置存储(用于长期记忆 - 跨对话知识)
                agent.store = store
            yield
    except Exception as e:
        logger.error(f"Error during database/store initialization: {e}")
        raise


app = FastAPI(lifespan=lifespan)
# 将之前定义的 lifespan函数作为应用的生命周期管理器
# 当应用启动时，会执行 lifespan中的初始化代码
# 当应用关闭时，会执行 lifespan中的清理代码

router = APIRouter(dependencies=[Depends(verify_bearer)])


# 指定每个路由请求前都要执行 verify_bearer函数


@router.get("/info")
async def info() -> ServiceMetadata:
    """ 返回服务元数据信息，包括可用代理、模型以及默认配置。"""

    #  获取可用模型列表
    # TODO: 设置默认一个模型可以使用
    models = list(settings.AVAILABLE_MODELS)
    models.sort()
    # 返回 ServiceMetadata 对象
    return ServiceMetadata(
        agents=get_all_agent_info(),  # 获取所有可用代理信息
        models=models,  # 排序后的模型列表
        default_agent=DEFAULT_AGENT,  # 使用的默认代理
        default_model=settings.DEFAULT_MODEL,  # 默认模型标识符
    )


async def _handle_input(user_input: UserInput, agent: AgentGraph) -> tuple[dict[str, Any], UUID]:
    """
        解析用户输入并处理任何所需的中断恢复。
        返回代理调用的kwargs和run_id。
    """
    # 生成唯一的运行ID (用于追踪整个调用过程)
    run_id = uuid4()

    # 处理线程ID: 使用用户提供的thread_id或生成新的UUID
    thread_id = user_input.thread_id or str(uuid4())

    # 处理用户ID: 使用用户提供的user_id或生成新的UUID
    user_id = user_input.user_id or str(uuid4())

    # 创建基础可配置参数
    configurable = {"thread_id": thread_id, "user_id": user_id}

    #  如果用户指定了模型，添加到配置中
    # TODO: 后续设置固定模型，不可修改
    if user_input.model is not None:
        configurable["model"] = user_input.model

    # 初始化回调列表
    callbacks = []
    # 如果启用了Langfuse追踪，添加回调处理器
    if settings.LANGFUSE_TRACING:
        # 初始化Langfuse回调处理器
        langfuse_handler = CallbackHandler()
        # 添加到回调列表
        callbacks.append(langfuse_handler)

    # 处理额外的代理配置
    if user_input.agent_config:
        # 定义保留关键字（不能由用户配置）
        reserved_keys = {"thread_id", "user_id", "model"}

        # 检查是否有冲突的保留关键字
        if overlap := reserved_keys & user_input.agent_config.keys():
            # 如果有冲突，抛出422错误
            raise HTTPException(
                status_code=422,
                detail=f"agent_config contains reserved keys: {overlap}",
            )
        # 合并用户提供的配置到基础配置
        configurable.update(user_input.agent_config)
    # 创建完整的运行配置
    config = RunnableConfig(
        configurable=configurable,  # 可配置参数
        run_id=run_id,  # 唯一运行ID
        callbacks=callbacks,  # 回调处理器列表
    )

    # 获取当前代理状态
    state = await agent.aget_state(config=config)

    # 找出所有被中断的任务
    interrupted_tasks = [
        task for task in state.tasks if hasattr(task, "interrupts") and task.interrupts
    ]

    input: Command | dict[str, Any]
    if interrupted_tasks:
        # 如果有中断任务，创建恢复命令
        input = Command(resume=user_input.message)
    else:
        # 如果没有中断，创建标准人类消息输入
        input = {"messages": [HumanMessage(content=user_input.message)]}

    kwargs = {
        "input": input,
        "config": config,
    }

    return kwargs, run_id


@router.post("/{agent_id}/invoke")  # /invoke: 使用默认代理
@router.post("/invoke")  # 使用指定代理
async def invoke(user_input: UserInput, agent_id: str = DEFAULT_AGENT) -> ChatMessage:
    """
    通过用户输入调用代理以获取最终响应。
    若未提供agent_id参数，系统将使用默认代理。
    通过thread_id参数可持久保存并延续多轮对话，run_id参数也会附加到消息中用于记录反馈。
    使用user_id参数可实现跨多线程的对话持久化与延续。
    注意：当前仅返回最后一条消息或中断信息。
    当代理输出多条AIMessage时（例如interrupt-agent中的后台步骤，或research-assistant中的工具步骤），
    这些中间消息会被忽略。有观点认为应该包含这些消息。此种情况下可考虑更新API返回ChatMessage列表。
    """
    # TODO: agent_id 后面设置为默认的 主agent, 不由用户来确认
    agent: AgentGraph = get_agent(agent_id)
    # kwargs = {
    #     "input": input,
    #     "config": config,
    # }
    kwargs, run_id = await _handle_input(user_input, agent)

    try:
        # 异步调用代理，获取响应事件列表
        response_events: list[tuple[str, Any]] = await agent.ainvoke(**kwargs,
                                                                     stream_mode=["updates", "values"])  # 指定流模式

        # 获取最后一个响应事件（最新状态）
        response_type, response = response_events[-1]

        # 处理不同类型的响应
        if response_type == "values":
            # 正常响应：代理成功完成
            # 获取最后一条消息并转换为ChatMessage格式
            output = langchain_to_chat_message(response["messages"][-1])
        elif response_type == "updates" and "__interrupt__" in response:
            # 中断响应：代理执行被中断
            # 获取第一个中断值并转换为AIMessage
            output = langchain_to_chat_message(
                AIMessage(content=response["__interrupt__"][0].value)
            )
        else:
            # 未知响应类型
            raise ValueError(f"Unexpected response type: {response_type}")

        # 设置运行ID用于反馈追踪
        output.run_id = str(run_id)
        return output
    except Exception as e:
        # 异常处理
        logger.error(f"An exception occurred: {e}")
        raise HTTPException(status_code=500, detail="Unexpected error")


async def message_generator(
        user_input: StreamInput, agent_id: str = DEFAULT_AGENT
) -> AsyncGenerator[str, None]:
    """
    从代理生成消息流。
    这是 /stream 端点的核心实现。

    """
    # 1. 获取代理实例
    agent: AgentGraph = get_agent(agent_id)

    # 2. 处理输入参数
    kwargs, run_id = await _handle_input(user_input, agent)

    try:
        # 3. 启动代理流式调用
        async for stream_event in agent.astream(
                **kwargs,
                stream_mode=["updates", "messages", "custom"],
                subgraphs=True
        ):
            # 跳过非元组事件
            if not isinstance(stream_event, tuple):
                continue

            # 4. 处理事件结构
            if len(stream_event) == 3:
                # 带子图结构: (节点路径, 流模式, 事件)
                _, stream_mode, event = stream_event
            else:
                # 无子图结构: (流模式, 事件)
                stream_mode, event = stream_event
            new_messages = []

            # 5. 处理更新事件
            if stream_mode == "updates":
                for node, updates in event.items():
                    # 处理中断事件
                    if node == "__interrupt__":
                        interrupt: Interrupt
                        for interrupt in updates:
                            new_messages.append(AIMessage(content=interrupt.value))
                        continue

                    # 获取消息更新
                    updates = updates or {}
                    update_messages = updates.get("messages", [])

                    # 特殊处理监督节点和子代理
                    if "supervisor" in node or "sub-agent" in node:
                        # the only tools that come from the actual agent are the handoff and handback tools
                        if isinstance(update_messages[-1], ToolMessage):
                            if "sub-agent" in node and len(update_messages) > 1:
                                # 子代理保留最后2条消息
                                update_messages = update_messages[-2:]
                            else:
                                # 监督节点保留最后1条消息
                                update_messages = [update_messages[-1]]
                        else:
                            update_messages = []
                    new_messages.extend(update_messages)

            # 6. 处理自定义事件
            if stream_mode == "custom":
                new_messages = [event]

            # 7. 处理消息片段
            processed_messages = []
            current_message: dict[str, Any] = {}
            for message in new_messages:
                if isinstance(message, tuple):
                    # 消息片段 (字段名, 字段值)
                    key, value = message
                    current_message[key] = value
                else:
                    # 完整消息对象
                    if current_message:
                        processed_messages.append(_create_ai_message(current_message))
                        current_message = {}
                    processed_messages.append(message)

            # 添加剩余消息片段
            if current_message:
                processed_messages.append(_create_ai_message(current_message))

            # 8. 生成消息事件
            for message in processed_messages:
                try:
                    chat_message = langchain_to_chat_message(message)
                    chat_message.run_id = str(run_id)
                    # 过滤重复的人类消息
                    if chat_message.type == "human" and chat_message.content == user_input.message:
                        continue
                    # 生成SSE格式消息
                    yield f"data: {json.dumps({'type': 'message', 'content': chat_message.model_dump()})}\n\n"

                except Exception as e:
                    logger.error(f"Error parsing message: {e}")
                    yield f"data: {json.dumps({'type': 'error', 'content': 'Unexpected error'})}\n\n"

            if stream_mode == "messages":
                # 检查是否启用令牌流
                if not user_input.stream_tokens:
                    continue

                # 跳过标记为跳过的消息
                msg, metadata = event
                if "skip_stream" in metadata.get("tags", []):
                    continue

                # 只处理AI消息片段
                if not isinstance(msg, AIMessageChunk):
                    continue

                # 移除工具调用内容
                content = remove_tool_calls(msg.content)
                if content:
                    # 生成令牌事件
                    yield f"data: {json.dumps({'type': 'token', 'content': convert_message_content_to_string(content)})}\n\n"
    except Exception as e:
        logger.error(f"Error in message generator: {e}")
        yield f"data: {json.dumps({'type': 'error', 'content': 'Internal server error'})}\n\n"
    finally:
        yield "data: [DONE]\n\n"


def _create_ai_message(parts: dict) -> AIMessage:
    """
       从部分消息数据创建完整的 AIMessage 对象
       参数:  parts: 包含消息部分数据的字典 (如 {'content': 'Hello', 'tool_calls': [...]})
       返回: 完整的 AIMessage 实例
       """
    sig = inspect.signature(AIMessage)
    valid_keys = set(sig.parameters)
    filtered = {k: v for k, v in parts.items() if k in valid_keys}
    return AIMessage(**filtered)


def _sse_response_example() -> dict[int | str, Any]:
    return {
        status.HTTP_200_OK: {
            "description": "Server Sent Event Response",
            "content": {
                "text/event-stream": {
                    "example": "data: {'type': 'token', 'content': 'Hello'}\n\ndata: {'type': 'token', 'content': ' World'}\n\ndata: [DONE]\n\n",
                    "schema": {"type": "string"},
                }
            },
        }
    }


@router.post("/{agent_id}/stream", response_class=StreamingResponse, responses=_sse_response_example(),)
@router.post("/stream", response_class=StreamingResponse, responses=_sse_response_example())
async def stream(user_input: StreamInput, agent_id: str = DEFAULT_AGENT) -> StreamingResponse:
    """
    调用 message_generator生成 SSE 流
    支持实时传输中间消息和令牌
    """
    return StreamingResponse(
        message_generator(user_input, agent_id),
        media_type="text/event-stream",
    )


@router.post("/feedback")
async def feedback(feedback: Feedback) -> FeedbackResponse:
    """
    使用 LangsmithClient创建反馈记录
    将反馈发送到 LangSmith 服务
    """
    client = LangsmithClient()
    kwargs = feedback.kwargs or {}
    client.create_feedback(
        run_id=feedback.run_id,
        key=feedback.key,
        score=feedback.score,
        **kwargs,
    )
    return FeedbackResponse()


@router.post("/history")
async def history(input: ChatHistoryInput) -> ChatHistory:
    """
    Get chat history.
    """
    # TODO: 设置默认Agent
    agent: AgentGraph = get_agent(DEFAULT_AGENT)
    try:
        state_snapshot = await agent.aget_state(
            config=RunnableConfig(configurable={"thread_id": input.thread_id})
        )
        messages: list[AnyMessage] = state_snapshot.values["messages"]
        chat_messages: list[ChatMessage] = [langchain_to_chat_message(m) for m in messages]
        return ChatHistory(messages=chat_messages)
    except Exception as e:
        logger.error(f"An exception occurred: {e}")
        raise HTTPException(status_code=500, detail="Unexpected error")



@app.get("/health")
async def health_check():
    """Health check endpoint."""

    health_status = {"status": "ok"}

    if settings.LANGFUSE_TRACING:
        try:
            langfuse = Langfuse()
            health_status["langfuse"] = "connected" if langfuse.auth_check() else "disconnected"
        except Exception as e:
            logger.error(f"Langfuse connection error: {e}")
            health_status["langfuse"] = "disconnected"

    return health_status


app.include_router(router)
