from langchain import hub
from customize.get_ollama import GetOllama
from langgraph.prebuilt import create_react_agent
from customize.bocha_web_search import get_bocha_tool
from typing import Literal
from langgraph.graph import END
from typing import Union
import operator
from typing import Annotated, List, Tuple
from typing_extensions import TypedDict
from pydantic import BaseModel, Field
from langgraph.graph import StateGraph, START
from langchain_core.prompts import ChatPromptTemplate
from customize.save_image import save_graph
from langchain.prompts import ChatMessagePromptTemplate
import asyncio

prompt_template = ChatPromptTemplate.from_messages([
    ("system", "你是一位人工智能助手，请你在执行任务后，使用绝对简洁、绝对肯定的语句回答问题；如果问题有多个答案，选择一个你认为最好的答案；不要解释问题。"),
    "{messages}"
])
# prompt = hub.pull("ih/ih-react-agent-executor")
prompt_template.pretty_print()
tools = [get_bocha_tool()]
llm = GetOllama( model_name="qwen2.5:14b", model_type=1)()
agent_executor = create_react_agent(llm, tools, state_modifier=prompt_template)


class PlanExecute(TypedDict):
    input: str
    plan: List[str]
    past_steps: Annotated[List[Tuple], operator.add]
    response: str


class Plan(BaseModel):
    """Plan to follow in future"""

    steps: List[str] = Field(
        description="different steps to follow, should be in sorted order"
    )


planner_prompt = ChatPromptTemplate.from_messages(
    [
        (
            "system",
            """对于给定的目标，提出一个简单的分步计划。
            此计划应涉及单个任务，如果正确执行将产生正确的答案。不要添加任何多余的步骤。
            最后一步的结果应该是最终答案。确保每个步骤都包含所需的所有信息 —— 不要跳过步骤。
            请按下面的格式输出计划：
                "steps":["计划1","计划2","计划3"]""",
        ),
        ("placeholder", "{messages}"),
    ]
)
planner_llm = GetOllama(model_name="qwen2.5:14b", model_type=1, temperature=0)()
planner = planner_prompt | planner_llm.with_structured_output(Plan)

# result = planner.invoke(
#     {
#         "messages": [
#             ("user", "今年巴黎奥运会乒乓球男单冠军，他有没有结婚?")
#         ]
#     }
# )
#
#
# print(result)


class Response(BaseModel):
    """Response to user."""

    response: str


class Act(BaseModel):
    """Action to perform."""

    action: Union[Response, Plan] = Field(
        # description="Action to perform. If you want to respond to user, use Response. "
        # "If you need to further use tools to get the answer, use Plan."
        description="要执行的操作。如果要响应用户，请使用Response。如果你需要进一步使用工具来得到答案，使用Plan。"
    )


replanner_prompt = ChatPromptTemplate.from_template(
#     """For the given objective, come up with a simple step by step plan. \
# This plan should involve individual tasks, that if executed correctly will yield the correct answer. Do not add any superfluous steps. \
# The result of the final step should be the final answer. Make sure that each step has all the information needed - do not skip steps.
#
# Your objective was this:
# {input}
#
# Your original plan was this:
# {plan}
#
# You have currently done the follow steps:
# {past_steps}
#
# Update your plan accordingly. If no more steps are needed and you can return to the user, then respond with that. Otherwise, fill out the plan. Only add steps to the plan that still NEED to be done. Do not return previously done steps as part of the plan."""
"""
    对于给定的目标，提出一个简单的分步计划。\
    此计划应涉及单个任务，如果正确执行将产生正确的答案。不要添加任何多余的步骤。\
    最后一步的结果应该是最终答案。确保每个步骤都包含所需的所有信息——不要跳过步骤。
    
    你的目标是：
    {input}
    
    你最初的计划是这样的：
    {plan}
    
    您目前已完成以下步骤：
    {past_steps}
    相应地更新您的计划，只在计划中添加仍然需要完成的步骤。不要将以前完成的步骤作为计划的一部分返回。
    
    如果你已经得最终答案或结果，无需其它步骤，使用以下格式返回给用户:
        "action":{{"response":"你的回复"}}
    否则，填写计划，并按以下格式返回：
        "action":{{"steps":["步骤1","步骤2","步骤3"]}}
    不允许使用其它不符合以上格式的返回形式。
    
"""
)

replanner_llm = GetOllama( model_name="qwen2.5:14b", model_type=1, temperature=0)()
replanner = replanner_prompt | replanner_llm.with_structured_output(Act)


async def execute_step(state: PlanExecute):
    plan = state["plan"]
    plan_str = "\n".join(f"{i+1}. {step}" for i, step in enumerate(plan))
    task = plan[0]
    task_formatted = f"""For the following plan:
{plan_str}\n\nYou are tasked with executing step {1}, {task}."""
    agent_response = await agent_executor.ainvoke(
        {"messages": [("user", task_formatted)]}
    )
    return {
        "past_steps": [(task, agent_response["messages"][-1].content)],
    }


async def plan_step(state: PlanExecute):
    plan = await planner.ainvoke({"messages": [("user", state["input"])]})
    return {"plan": plan.steps}


async def replan_step(state: PlanExecute):
    output = await replanner.ainvoke(state)
    if isinstance(output.action, Response):
        return {"response": output.action.response}
    else:
        return {"plan": output.action.steps}


def should_end(state: PlanExecute):
    if "response" in state and state["response"]:
        return END
    else:
        return "agent"


workflow = StateGraph(PlanExecute)

# Add the plan node
workflow.add_node("planner", plan_step)

# Add the execution step
workflow.add_node("agent", execute_step)

# Add a replan node
workflow.add_node("replan", replan_step)

workflow.add_edge(START, "planner")

# From plan we go to agent
workflow.add_edge("planner", "agent")

# From agent, we replan
workflow.add_edge("agent", "replan")

workflow.add_conditional_edges(
    "replan",
    # Next, we pass in the function that will determine which node is called next.
    should_end,
    ["agent", END],
)

# Finally, we compile it!
# This compiles it into a LangChain Runnable,
# meaning you can use it as you would any other runnable
app = workflow.compile()
# save_graph(app, "langgraph_plan-and-execute.png")


async def my_function():
    config = {"recursion_limit": 50}
    inputs = {"input": "今年美国总统当选人，他的太太叫什么名字?"}
    async for event in app.astream(inputs, config=config):
        for k, v in event.items():
            if k != "__end__":
                print(v)
asyncio.run(my_function())