"""Define a custom Reasoning and Action agent.

Works with a chat model with tool calling support.
"""

from datetime import datetime, timezone
from typing import Dict, List, Literal, cast
import os

from langchain_core.messages import AIMessage, HumanMessage, ToolMessage
from langchain_core.runnables import RunnableConfig
from langgraph.graph import StateGraph
from langgraph.prebuilt import ToolNode

from react_agent.configuration import Configuration
from react_agent.state import InputState, State
from react_agent.tools import TOOLS
from react_agent.utils import load_chat_model, image_uri_extract, load_img_db, append_img_db

# Define the function that calls the model


async def museum_ai_agent(
    state: State, config: RunnableConfig
) -> Dict[str, List[AIMessage]]:
    """Call the LLM powering our "agent".

    This function prepares the prompt, initializes the model, and processes the response.

    Args:
        state (State): The current state of the conversation.
        config (RunnableConfig): Configuration for the model run.

    Returns:
        dict: A dictionary containing the model's response message.
    """
    configuration = Configuration.from_runnable_config(config)

    # Initialize the model with tool binding. Change the model or add more tools here.
    model = load_chat_model(
        configuration.model, configuration.model_max_predict, configuration.model_context).bind_tools(TOOLS)

    # Format the system prompt. Customize this to change the agent's behavior.
    system_message = configuration.text_qa_prompt.format(
        system_time=datetime.now(tz=timezone.utc).isoformat()
    )

    # Get the model's response
    response = cast(
        AIMessage,
        await model.ainvoke(
            [{"role": "system", "content": system_message}, *state.messages], config
        ),
    )

    # Handle the case when it's the last step and the model still wants to use a tool
    if state.is_last_step and response.tool_calls:
        return {
            "messages": [
                AIMessage(
                    id=response.id,
                    content="Sorry, I could not find an answer to your question in the specified number of steps.",
                )
            ]
        }

    # Return the model's response as a list to be added to existing messages
    return {"messages": [response]}


async def image_retrieve(state: State, config: RunnableConfig) -> Dict[str, List[AIMessage]]:
    last_message = state.messages[-1]
    if not isinstance(last_message, HumanMessage):
        raise ValueError(
            f"Expected HumanMessage in output edges, but got {type(last_message).__name__}"
        )

    config = Configuration.from_runnable_config(config)
    query = last_message.content
    print("User Query:", query)
    img_uri = image_uri_extract(query)
    if img_uri is None:
        raise ValueError(f"No image URI found in the query '{query}'")
    print("Extracted Image URI:", img_uri)
    if img_uri.startswith("./"):
        img_uri = os.path.join(config.museumai_root,
                               img_uri.replace("./", "")).replace("\\", "/")
    if (not img_uri.startswith("http")) and (not os.path.exists(img_uri)):
        raise FileNotFoundError(f"Image '{img_uri}' not found. Please re-input correct path.")

    db_path = config.img_db_path
    vector_store = None
    if os.path.exists(db_path) and os.path.isdir(db_path) and os.listdir(db_path):
        vector_store = load_img_db(db_path)
        print("Load Image db done.")
    else:
        print("First time loading text db. This may take a while.")
        img_dir = db_path.replace("dunhuang_img_db", "dunhuang_raw")
        vector_store = append_img_db(db_path, img_dir)

    if not os.environ.get("NOMIC_API_KEY"):
        raise ValueError(
            "NOMIC_API_KEY not found and we need it to embed image. Please set it in the environment variable or in .env file")
    docs = vector_store.similarity_search_by_image(img_uri, k=config.img_top_k)
    docs_content = []
    for doc in docs:
        img_path = doc.metadata["path"]
        print("Similar image path:", img_path)
        txt_path = img_path.replace(".png", ".txt")
        print("Context text path:", txt_path)
        txt_path = config.museumai_root + txt_path.replace("./", "/").replace("\\", "/")
        with open(txt_path, "r", encoding='utf-8') as f:
            img_content = f.read()
            prefix = txt_path.split(
                "/")[-2] + "窟 " + os.path.basename(txt_path).replace("_", "").replace(".txt", "") + ". "
            docs_content.append(prefix + img_content)
    docs_content = "图片内容：" + "\n\n".join(docs_content)
    return {
        "messages": [
            ToolMessage(
                content=docs_content,
                tool_call_id="image_retrieve"
            )
        ]
    }


async def generate(state: State, config: RunnableConfig) -> Dict[str, List[AIMessage]]:
    """Generate a response based on retrieved image content"""
    last_message = state.messages[-1]
    if not isinstance(last_message, ToolMessage):
        raise ValueError(
            f"Expected ToolMessage in output edges, but got {type(last_message).__name__}"
        )

    configuration = Configuration.from_runnable_config(config)

    # Initialize the model with tool binding. Change the model or add more tools here.
    model = load_chat_model(
        configuration.model, configuration.model_max_predict, configuration.model_context)

    # Format the system prompt. Customize this to change the agent's behavior.
    system_message = configuration.image_qa_prompt.format(
        system_time=datetime.now(tz=timezone.utc).isoformat()
    )

    # Get the model's response
    response = cast(
        AIMessage,
        await model.ainvoke(
            [system_message, *state.messages], config
        ),
    )
    # Return the model's response as a list to be added to existing messages
    return {"messages": [response]}


# Define a new graph

builder = StateGraph(State, input=InputState, config_schema=Configuration)

builder.add_node(image_retrieve)
builder.add_node(generate)
builder.add_node(museum_ai_agent)
builder.add_node("tools", ToolNode(TOOLS))


def route_start_output(state: State) -> Literal["museum_ai_agent", "image_retrieve"]:
    """Determine the next node based on the __start__ state

    This function checks if the start message contains an image query.

    Args:
        state (State): The current state of the conversation.

    Returns:
        str: The name of the next node to call ("museum_ai_agent" or "image_retrieve").
    """
    last_message = state.messages[-1]
    if not isinstance(last_message, HumanMessage):
        raise ValueError(
            f"Expected HumanMessage in output edges, but got {type(last_message).__name__}"
        )
    # If there is no image query, then we finish
    if not image_uri_extract(last_message.content):
        return "museum_ai_agent"
    # Otherwise we retrieve the image
    return "image_retrieve"


# Add a conditional edge to determine the next step after `museum_ai_agent`
builder.add_conditional_edges(
    "__start__",
    # After __start__ finishes running, the next node(s) are scheduled
    # based on the output from route_start_output
    route_start_output,
)
builder.add_edge("image_retrieve", "generate")
builder.add_edge("generate", "__end__")


def route_agent_output(state: State) -> Literal["__end__", "tools"]:
    """Determine the next node based on the agent's output.

    This function checks if the agent's last message contains tool calls.

    Args:
        state (State): The current state of the conversation.

    Returns:
        str: The name of the next node to call ("__end__" or "tools").
    """
    last_message = state.messages[-1]
    if not isinstance(last_message, AIMessage):
        raise ValueError(
            f"Expected AIMessage in output edges, but got {type(last_message).__name__}"
        )
    # If there is no tool call, then we finish
    if not last_message.tool_calls:
        return "__end__"
    # Otherwise we execute the requested actions
    return "tools"


# Add a conditional edge to determine the next step after `museum_ai_agent`
builder.add_conditional_edges(
    "museum_ai_agent",
    # After museum_ai_agent finishes running, the next node(s) are scheduled
    # based on the output from route_model_output
    route_agent_output,
)

# Add a normal edge from `tools` to `museum_ai_agent`
# This creates a cycle: after using tools, we always return to the agent
builder.add_edge("tools", "museum_ai_agent")

# Compile the builder into an executable graph
# You can customize this by adding interrupt points for state updates
graph = builder.compile(
    interrupt_before=[],  # Add node names here to update state before they're called
    interrupt_after=[],  # Add node names here to update state after they're called
)
graph.name = "ReAct Agent for DunHuang"  # This customizes the name in LangSmith
