"""Define a custom Reasoning and Action agent.

Works with a chat model with tool calling support.
"""

from contextlib import asynccontextmanager
from datetime import UTC, datetime
from typing import Dict, List, Literal, cast

from langchain_core.messages import AIMessage, ToolMessage, HumanMessage
from langgraph.graph import StateGraph
from langgraph.graph.state import CompiledStateGraph
from langgraph.prebuilt import ToolNode, create_react_agent

from react_agent.configuration import Configuration
from react_agent.state import InputState, State
from react_agent.utils import load_chat_model
from langchain_mcp_adapters.client import MultiServerMCPClient, load_mcp_tools
from langchain_core.runnables import RunnableConfig
from langchain_ollama import ChatOllama
from langchain.chat_models import init_chat_model
from langchain.chat_models.base import BaseChatModel
from langgraph.types import Command
import asyncio
from contextlib import AsyncExitStack

_exit_stack = AsyncExitStack()

# Define the function that calls the model
async def load_chat_model(conf: Configuration) -> BaseChatModel:
    model = None
    if conf.model:
        provider, model = conf.model.split("/", maxsplit=1)
        if provider == "ollama":
            model = ChatOllama(model=model, num_gpu=38)
        else:
            model = init_chat_model(model, model_provider=provider)
    return model

async def load_mcp_server(exit_stack: AsyncExitStack, conf: Configuration, name: str):
    mcp = MultiServerMCPClient(conf.mcp_servers)
    tools = []
    for k in conf.mcp_servers:
        session = await exit_stack.enter_async_context(mcp.session(k))
        tools += await load_mcp_tools(session)
    return ToolNode(tools, name=name)

async def init_state(state: State, config: RunnableConfig):
    global _exit_stack
    config = Configuration.from_runnable_config(config)
    model = await load_chat_model(config)
    tools = await load_mcp_server(_exit_stack, config, "__mcp_tools__")
    model = model.bind_tools(list(tools.tools_by_name.values()))
    return {
        "llm": model,
        "tools": tools,
    }


async def call_model(state: State, config: RunnableConfig) -> Dict[str, List[AIMessage]]:
    """Call the LLM powering our "agent".

    This function prepares the prompt, initializes the model, and processes the response.

    Args:
        state (State): The current state of the conversation.
        config (RunnableConfig): Configuration for the model run.

    Returns:
        dict: A dictionary containing the model's response message.
    """
    configuration = Configuration.from_runnable_config(config)
    # Initialize the model with tool binding. Change the model or add more tools here.
    model = state.llm

    # Format the system prompt. Customize this to change the agent's behavior.
    system_message = configuration.system_prompt.format(
        system_time=datetime.now(tz=UTC).isoformat()
    )
    # decrease the tool messages 
    b_is_first = True
    for msg in reversed(state.messages):
        if isinstance(msg, ToolMessage):
            if b_is_first:
                b_is_first = False
            else:
                msg.content = "success"
    msg = [{"role": "system", "content": system_message}, *state.messages]
    # Get the model's response
    response = cast(
        AIMessage,
        await model.ainvoke(
            msg
        ),
    )

    # Handle the case when it's the last step and the model still wants to use a tool
    if state.is_last_step and response.tool_calls:
        return {
            "messages": [
                AIMessage(
                    id=response.id,
                    content="Sorry, I could not find an answer to your question in the specified number of steps.",
                )
            ]
        }

    # Return the model's response as a list to be added to existing messages
    return {"messages": [response]}

async def call_tools(state: State, config: RunnableConfig) -> dict[str, list[ToolMessage]]:
    return await state.tools.ainvoke(state, config)

def route_model_output(state: State) -> Literal["__end__", "call_tools"]:
    """Determine the next node based on the model's output.

    This function checks if the model's last message contains tool calls.

    Args:
        state (State): The current state of the conversation.

    Returns:
        str: The name of the next node to call ("__end__" or "call_tools").
    """
    last_message = state.messages[-1]
    if not isinstance(last_message, AIMessage):
        raise ValueError(
            f"Expected AIMessage in output edges, but got {type(last_message).__name__}"
        )
    # If there is no tool call, then we finish
    if not last_message.tool_calls:
        return "__end__"
    # Otherwise we execute the requested actions
    return "call_tools"


async def build_graph() -> CompiledStateGraph:
    # Define a new graph
    builder = StateGraph(State, input=InputState, config_schema=Configuration)
    # Define the two nodes we will cycle between
    builder.add_node(call_model)
    builder.add_node(call_tools)
    builder.add_node(init_state)
    # Set the entrypoint as `call_model`
    # This means that this node is the first one called
    builder.add_edge("__start__", "init_state")
    builder.add_edge("init_state", "call_model")
    # Add a conditional edge to determine the next step after `call_model`
    builder.add_conditional_edges(
        "call_model",
        # After call_model finishes running, the next node(s) are scheduled
        # based on the output from route_model_output
        route_model_output,
    )
    # Add a normal edge from `tools` to `call_model`
    # This creates a cycle: after using tools, we always return to the model
    builder.add_edge("call_tools", "call_model")
    # Compile the builder into an executable graph
    graph = builder.compile(name="ReAct Agent")
    return graph

async def main() -> None:
    """Run the graph."""
    # Initialize the graph with the configuration
    mcp_servers = {
        "playwright": {
            "command": "npx",
            "args": ["-y", "@playwright/mcp@latest", "--config", r"D:\Application\ai\playwright.json"],
            "transport": "stdio"
        }
    }

    message = HumanMessage(content="打开百度，查询今日黄金价格")

    graph = await build_graph()
    conf = {
        "configurable": {
            "mcp_servers": mcp_servers
        }
    }

    async for message_chunk, metadata in graph.astream({"messages": message}, stream_mode="messages", config=conf):
        if message_chunk.content:
            print(message_chunk.content, end="|", flush=True)

if __name__ == "__main__":
    asyncio.run(main())
