import operator
from langgraph.graph import StateGraph, START, END
from pydantic import BaseModel, Field
from typing import Annotated, List, Tuple, Union
from typing_extensions import TypedDict
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.messages import SystemMessage, HumanMessage, ToolMessage, AIMessage
import re
# from langgraph.prebuilt import create_react_agent
from langchain_ollama import ChatOllama

# llm = ChatOllama(model="qwen3:8b", temperature=0.6)

seq = 1

class Plan(BaseModel):
    """Plan to follow in future"""
    steps: List[str] = Field(
        description="different steps to follow, should be in sorted order"
    )

class PlanExecute(TypedDict):
    input: str
    plan: List[str]
    past_steps: Annotated[List[Tuple], operator.add]
    response: str

def plan_step(state: PlanExecute):
    global seq
    print(f"============== plan_step ({seq}) ================")
    planner_prompt = ChatPromptTemplate.from_messages(
        [
            SystemMessage(content="For the given objective, come up with a simple step by step plan. This plan should involve individual tasks, that if executed correctly will yield the correct answer. Do not add any superfluous steps. The result of the final step should be the final answer. Make sure that each step has all the information needed - do not skip steps. Must use Chinese as output."),
            ("placeholder", "{messages}"),
        ]
    )
    planner = planner_prompt | ChatOllama(model="qwen3:8b", temperature=0.6).with_structured_output(Plan)
    plan = planner.invoke({"messages": [HumanMessage(content=state["input"])]})
    return {"plan": plan.steps}

prompt = "You are a helpful assistant. Just give the answer without any explanation. Do not repeat the question. Do not say 'The answer is' or similar phrases. Just provide the answer directly."
# agent_executor = create_react_agent(llm, tools, prompt=prompt)
# agent_executor = create_react_agent(llm, prompt=prompt)

def execute_task(summary, task):
    global seq
    print(f"============== execute_task ({seq}) ================")
    print("summary: ",summary)
    print("task: ",task)
    output = ""
    if re.search(r'(公开赛)', task):
        output = "男子单打冠军的姓名是德约科维奇。"
    elif re.search(r'(查找)', task):
        output = "德约科维奇出生在塞尔维亚贝尔格莱德。"
    elif re.search(r'(确认|是否)', task):
        output = "德约科维奇出生在塞尔维亚贝尔格莱德是正确的。"
    else:
        output = "所有信息已收集，所有任务已完成，所有输出都是中文。"
    return {
        "messages": [
            HumanMessage(content=summary),
            AIMessage(content=f"{output}"),
        ]
    }

def execute_step(state: PlanExecute):
    global seq
    print(f"============== execute_task_step ({seq}) ================")
    plan = state["plan"]
    plan_str = "\n".join(f"{i + 1}. {step}" for i, step in enumerate(plan))
    print(plan_str)
    task = plan[0]
    task_formatted = f"""For the following plan:\n{plan_str}\nYou are tasked with executing step {1}, {task}."""
    # agent_response = agent_executor.invoke({"messages": [HumanMessage(content=task_formatted)]})
    agent_response = execute_task(task_formatted, task)
    return {
        "past_steps": [(task, agent_response["messages"][-1].content)],
    }

class Response(BaseModel):
    """Response to user."""
    response: str

class Act(BaseModel):
    """Action to perform."""

    action: Union[Response, Plan] = Field(
        description="Action to perform. If you want to respond to user, use Response. "
        # "If you need to further use tools to get the answer, use Plan."
    )

replanner_prompt = ChatPromptTemplate.from_template(
    """For the given objective, come up with a simple step by step plan. This plan should involve individual tasks, that if executed correctly will yield the correct answer. Do not add any superfluous steps. The result of the final step should be the final answer. Make sure that each step has all the information needed - do not skip steps. 

Your objective was this:
{input}

Your original plan was this:
{plan}

You have currently done the follow steps:
{past_steps}

Update your plan accordingly. If no more steps are needed and you can return to the user, then respond with that. Otherwise, fill out the plan. Only add steps to the plan that still NEED to be done. Do not return previously done steps as part of the plan. Must use Chinese as output."""
)

replanner = replanner_prompt | ChatOllama(model="qwen3:8b", temperature=0).with_structured_output(Act)

def replan_step(state: PlanExecute):
    global seq
    print(f"============== replan_step ({seq}) ================")
    print(state)
    output = replanner.invoke(state)
    if isinstance(output.action, Response):
        return {"response": output.action.response}
    else:
        return {"plan": output.action.steps}

def should_end(state: PlanExecute):
    global seq
    seq += 1

    if state["plan"] is None or len(state["plan"]) == 0:
        return END
    
    if "response" in state and state["response"]:
        return END
   
    return "agent"

workflow = StateGraph(PlanExecute)

workflow.add_node("planner", plan_step)
workflow.add_node("agent", execute_step)
workflow.add_node("replan", replan_step)

workflow.add_edge(START, "planner")
workflow.add_edge("planner", "agent")
workflow.add_edge("agent", "replan")
workflow.add_conditional_edges(
    "replan",
    # Next, we pass in the function that will determine which node is called next.
    should_end,
    ["agent", END],
)

app = workflow.compile()

config = {"recursion_limit": 50}
inputs = {"input": "what is the hometown of the mens 2024 Australia open winner?"}

print("--- begin graph ---")
for event in app.stream(inputs, config=config):
    for k, v in event.items():
        print(v)
