from typing import Literal, TypedDict
import uuid

from langgraph.constants import START, END
from langgraph.graph import StateGraph
from langgraph.types import interrupt, Command
from langgraph.checkpoint.memory import InMemorySaver
from langchain_core.messages import SystemMessage, HumanMessage

from langchain_ollama import ChatOllama
llm = ChatOllama(model="qwen3:8b", temperature=0.5, reasoning=False)

# Define the shared graph state
class State(TypedDict):
    user_input: str
    llm_output: str
    decision: str

# Simulate an LLM output node
def generate_llm_output(state: State) -> State:
    response = llm.invoke(
        [
            SystemMessage(content="You are a helpful assistant."), 
            HumanMessage(content=state["user_input"])
        ]
    )
    # print(response.content)
    return {"llm_output": response.content}
    # return {"llm_output": "This is the generated output."}

# Human approval node
def human_approval(state: State) -> Command[Literal["approved_path", "rejected_path"]]:
    decision = interrupt({
        "question": "Do you approve the following joke? [approve/reject]",
        "llm_output": state["llm_output"]
    })

    if decision == "approve":
        return Command(goto="approved_path", update={"decision": "approved"})
    else:
        return Command(goto="rejected_path", update={"decision": "rejected"})

# Next steps after approval
def approved_node(state: State) -> State:
    print("✅ Approved path taken by user.")
    return state

# Alternative path after rejection
def rejected_node(state: State) -> State:
    print("❌ Rejected path taken by user.")
    return state

# Build the graph
builder = StateGraph(State)
builder.add_node("generate_llm_output", generate_llm_output)
builder.add_node("human_approval", human_approval)
builder.add_node("approved_path", approved_node)
builder.add_node("rejected_path", rejected_node)

# builder.set_entry_point("generate_llm_output")
builder.add_edge(START, "generate_llm_output")
builder.add_edge("generate_llm_output", "human_approval")
builder.add_edge("approved_path", END)
builder.add_edge("rejected_path", END)

checkpointer = InMemorySaver()
graph = builder.compile(checkpointer=checkpointer)

# Run until interrupt
config = {"configurable": {"thread_id": uuid.uuid4()}}
# result = graph.invoke({"user_input": "Tell me a joke about a chicken."}, config=config)
# review = result["__interrupt__"]
# reviewVal = review[0].value
# print(reviewVal["question"])
# print(reviewVal["llm_output"])

# Output:
# Interrupt(value={'question': 'Do you approve the following output?', 'llm_output': 'This is the generated output.'}, ...)

# print(">>>>>>>>>>>>>>>>>>> Resuming with human input... >>>>>>>>>>>>>>>>>>>")

# Simulate resuming with human input
# To test rejection, replace resume="approve" with resume="reject"
# final_result = graph.invoke(Command(resume="approve"), config=config)
# print(final_result)

def stream_graph_updates(user_input: str):
    print("\nSystem:\n")
    result = graph.invoke({"user_input": user_input}, config=config)
    reviews = result["__interrupt__"]
    review = reviews[0].value
    print(review["question"])
    print(review["llm_output"])

def resume_graph(resume: str):
    final_result = graph.invoke(Command(resume=resume), config=config)
    print(final_result)

while True:
    try:
        user_input = input("User: ")
        if user_input.lower() in ["quit", "exit", "q"]:
            print("Goodbye!")
            break
        if user_input.lower() in ["approve", "reject"]:
            resume_graph(user_input)
        else:
            stream_graph_updates(user_input)
    except:
        break
