import os
from langchain_ollama import ChatOllama

max_recursion_limit = os.getenv("MAX_RECURSION_LIMIT")
MODEL=os.getenv("MODEL")
BASE_URL=os.getenv("BASE_URL")
model = ChatOllama(model=MODEL, base_url=BASE_URL)

from typing import Literal
from langchain_core.tools import tool
from langchain_openai import ChatOpenAI, OpenAI
from langchain_core.messages.tool import ToolCall
from langgraph.graph import StateGraph, END
from langgraph.prebuilt import ToolNode
from langgraph.graph import MessagesState
from langgraph.managed.is_last_step import RemainingSteps
from langgraph.errors import GraphRecursionError
from uuid import uuid4

@tool
def get_weather(city: Literal["nyc", "sf"]):
    """Use this to get weather information."""


@tool
def get_time(
    locale: Literal["zh-CN", "ja-JP", "en-US"],
    offset: Literal[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
    positive: bool,
):
    """
    locale: location code.
    offset: timezone offset num
    positive:is positive offset
    """


tools = [get_weather, get_time]
model_with_tools = model.bind_tools(tools)


class AgentState(MessagesState):
    # Final structured response from the agent
    final_response: ToolCall
    remaining_steps: RemainingSteps


def call_model(state: AgentState):
    response = model_with_tools.invoke(state["messages"])
    return {"messages": [response]}


def respond(state: AgentState):
    # Construct the final answer from the arguments of the last tool call
    response = state["messages"][-1].tool_calls[0]

    # Since we're using tool calling to return structured output,
    # we need to add  a tool message corresponding to the WeatherResponse tool call,
    # This is due to LLM providers' requirement that AI messages with tool calls
    # need to be followed by a tool message for each tool call
    tool_message = {
        "type": "tool",
        "content": "asaaas",
        "tool_call_id": response["id"],
    }
    return {"final_response": response, "messages": [tool_message]}


def unsupport(state: AgentState = None):
    response = ToolCall(name="unsupport", args={}, id=str(uuid4()))
    tool_message = {
        "type": "tool",
        "content": "asaaas",
        "tool_call_id": response["id"],
    }
    return {"final_response": response, "messages": [tool_message]}

def error(e:GraphRecursionError):
    response = ToolCall(name=e.__class__.__name__, args=e.args, id=str(uuid4()))
    tool_message = {
        "type": "tool",
        "content": e.args[0],
        "tool_call_id": response["id"],
    }
    return {"final_response": response, "messages": [tool_message]}

# Define the function that determines whether to continue or not
def should_continue(state: AgentState):
    if state["remaining_steps"] <= 2:  # still has 2 times step upto max_limit
        return "unsupport"
    messages = state["messages"]
    last_message = messages[-1]
    # If there is only one tool call and it is the response tool call we respond to the user
    if last_message.tool_calls:
        return "respond"
    # Otherwise we will use the tool node again
    else:
        return "continue"


# Define a new graph
workflow = StateGraph(AgentState)

# Define the two nodes we will cycle between
workflow.add_node("agent", call_model)
workflow.add_node("respond", respond)
workflow.add_node("unsupport", unsupport)
workflow.add_node("tools", ToolNode(tools))

# Set the entrypoint as `agent`
# This means that this node is the first one called
workflow.set_entry_point("agent")

# We now add a conditional edge
workflow.add_conditional_edges(
    "agent",
    should_continue,
    {
        "continue": "tools",
        "respond": "respond",
        "unsupport": "unsupport",
    },
)

workflow.add_edge("tools", "agent")
workflow.add_edge("respond", END)
workflow.add_edge("unsupport", END)
graph = workflow.compile()
try:
    answer = graph.invoke(
        input={"messages": [("user", "what's the time on Japan ")]},
        # config={"recursion_limit": max_recursion_limit},
    )
    
except GraphRecursionError as e:
    answer = error(e)

print(answer["final_response"])
# {'name': 'get_time', 'args': {'locale': 'ja-JP', 'offset': 9, 'positive': True}, 'id': '2b6f7e40-a68a-4f88-8235-aebc929d9a41', 'type': 'tool_call'}