from langgraph.checkpoint.memory import InMemorySaver
from langgraph.types import interrupt
from langgraph.prebuilt import create_react_agent

from langchain_core.messages import SystemMessage, HumanMessage

from langchain_ollama import ChatOllama
llm = ChatOllama(model="qwen3:8b", temperature=0.5, reasoning=False)

from typing import Callable
from langchain_core.tools import BaseTool, tool as create_tool
from langchain_core.runnables import RunnableConfig
from langgraph.types import interrupt
from langgraph.prebuilt.interrupt import HumanInterruptConfig, HumanInterrupt

# def add_human_in_the_loop(
#     tool: Callable | BaseTool,
#     *,
#     interrupt_config: HumanInterruptConfig = None,
# ) -> BaseTool:
#     """Wrap a tool to support human-in-the-loop review."""
#     # print("isinstance(tool, BaseTool):", isinstance(tool, BaseTool))
#     if not isinstance(tool, BaseTool):
#         tool = create_tool(tool)
#         # print("create tool done")

#     if interrupt_config is None:
#         interrupt_config = {
#             "allow_accept": True,
#             "allow_edit": True,
#             "allow_respond": True,
#         }

#     @create_tool(  
#         tool.name,
#         description=tool.description,
#         args_schema=tool.args_schema
#     )
#     def call_tool_with_interrupt(config: RunnableConfig, **tool_input):
#         request: HumanInterrupt = {
#             "action_request": {
#                 "action": tool.name,
#                 "args": tool_input
#             },
#             "config": interrupt_config,
#             "description": "Please review the tool call"
#         }
#         response = interrupt([request])[0]
#         # approve the tool call
#         if response["type"] == "accept":
#             tool_response = tool.invoke(tool_input, config)
#         # update tool call args
#         elif response["type"] == "edit":
#             tool_input = response["args"]["args"]
#             tool_response = tool.invoke(tool_input, config)
#         # respond to the LLM with user feedback
#         elif response["type"] == "response":
#             user_feedback = response["args"]
#             tool_response = user_feedback
#         else:
#             raise ValueError(f"Unsupported interrupt response type: {response['type']}")

#         return tool_response

#     return call_tool_with_interrupt

# An example of a sensitive tool that requires human review / approval
def book_hotel(hotel_name: str):
   """Book a hotel"""
   return f"the hotel name has been changed and booked a stay at {hotel_name}."
#    return f"Successfully booked a stay at {hotel_name}."

# checkpointer = InMemorySaver() 

agent = create_react_agent(
    model=llm,
    # tools=[add_human_in_the_loop(book_hotel)],
    tools=[book_hotel],
    # checkpointer=checkpointer, 
)

# config = {
#    "configurable": {
#       "thread_id": "1"
#    }
# }

# for chunk in agent.stream(
#     {
#         "messages": 
#             [
#                 # {"role": "user", "content": "book a stay at McKittrick hotel"}
#                 # HumanMessage(content="book a stay at McKittrick hotel")
#                 HumanMessage(content="book a stay at any hotel")
#             ]
#     },
#     config
# ):
#     print("===================== Request Chunk =====================")
#     print(chunk)
#     if chunk.get("__interrupt__"):  # Check if the chunk contains an interrupt
#         break
#     print("\n")

# print("\n")

# from langgraph.types import Command

# for chunk in agent.stream(
#     # Command(resume=[{"type": "accept"}]),
#     Command(resume=[{"type": "edit", "args": {"args": {"hotel_name": "hhwang Hotel"}}}]),
#     config
# ):
#     print("===================== Resume Chunk =====================")
#     print(chunk)
#     print("\n")
