from langchain import hub
from customize.get_ollama import GetOllama
from langgraph.prebuilt import create_react_agent
from customize.bocha_web_search import get_bocha_tool
from typing import Literal
from langgraph.graph import END
from typing import Union
import operator
from typing import Annotated, List, Tuple
from typing_extensions import TypedDict
from pydantic import BaseModel, Field
from langgraph.graph import StateGraph, START
from langchain_core.prompts import ChatPromptTemplate
from customize.save_image import save_graph
from langchain.prompts import ChatMessagePromptTemplate
import asyncio
from langchain_core.tools import tool
import random


light_state = False
@tool
def people_detection() -> str:
    """
    检测环境中是否有人，并返回检测结果。
    :return: 当检测到人是放回True，没有检测到返回False
    """
    value = random.choice([True, False])
    #value = False
    print(f"环境检测结果{value}")
    if value:
        return "环境中有人。"
    else:
        return "环境中没有人。"


@tool
def light_control(control_instruction: bool) -> bool:
    """
    通过输入灯光控制命令，控制灯的开和关。
    :param control_instruction:True表示开灯，False表示关灯。
    :return: 灯开则返回True，灯关闭返回False
    """

    if control_instruction:
        global light_state
        print("Turn on the light!")
        light_state = True
        print("开灯动作")
        return True
    else:
        print("Turn off the light!")
        light_state = False
        print("关灯动作")
        return False


@tool
def light_detection() -> bool:
    """
    检测当前灯光的状态
    :return: 开返回True，关返回False
    """
    print(f"灯光检测结果：{light_state}")
    return light_state


prompt_template = ChatPromptTemplate.from_messages([
    #("system", "你是一位人工智能助手，请你在执行任务后，使用绝对简洁、绝对肯定的语句回答问题；如果问题有多个答案，选择一个你认为最好的答案；不要解释问题。"),
    ("system", """你作为一个管理助手，接收用户输入，调用工具，并依据以下要求做出反应：
当收到用户输入的文本时，首先剖析该文本的任务意图，任务可能涵盖检测环境中是否有人、检测环境的开关灯状态以及控制灯光开关这三种。
请依照以下逻辑调用工具：
1. 若需检测环境是否开灯，调用工具 [light_detection] 进行检测。
2. 若需开灯/关灯，调用工具 [light_control] 进行操作。
3. 只有在环境开灯状态下，才能检测环境中是否有人，因此需先调用工具 [light_detection] 检测环境是否开灯，再依据检测结果选择以下一项进行操作：
   1）处于开灯状态时，使用工具 [people_detection] 检测环境中是否有人。
   2）处于关灯状态时，先调用工具 [light_control] 开灯，再使用工具 [people_detection] 检测环境中是否有人。
4. 其它要求，组合1~3的操作，规划好执行的步骤，根据规划一步一步调用工具执行任务。
将最终的操作或检测结果用最简洁的语句反馈给用户。
    """),
    "{messages}"
])
# prompt = hub.pull("ih/ih-react-agent-executor")
prompt_template.pretty_print()
tools = [people_detection, light_control, light_detection]
llm = GetOllama(ip=GetOllama.ailab_linux_ip, model_name="qwen2.5:14b", model_type=1, temperature=0)()
agent_executor = create_react_agent(llm, tools, state_modifier=prompt_template)


class PlanExecute(TypedDict):
    input: str
    plan: List[str]
    past_steps: Annotated[List[Tuple], operator.add]
    response: str


class Plan(BaseModel):
    """Plan to follow in future"""

    steps: List[str] = Field(
        description="different steps to follow, should be in sorted order"
    )


planner_prompt = ChatPromptTemplate.from_messages(
    [
        (
            "system",
            """对于给定的目标，提出一个简单的分步计划。
            此计划应涉及单个任务，如果正确执行将产生正确的答案。不要添加任何多余的步骤。
            最后一步的结果应该是最终答案。确保每个步骤都包含所需的所有信息 —— 不要跳过步骤。
            请按下面的格式输出计划：
                "steps":["计划1","计划2","计划3"]""",
        ),
        ("placeholder", "{messages}"),
    ]
)
planner_llm = GetOllama(ip=GetOllama.ailab_linux_ip,model_name="qwen2.5:14b", model_type=1, temperature=0)()
planner = planner_prompt | planner_llm.with_structured_output(Plan)

# result = planner.invoke(
#     {
#         "messages": [
#             ("user", "今年巴黎奥运会乒乓球男单冠军，他有没有结婚?")
#         ]
#     }
# )
#
#
# print(result)


class Response(BaseModel):
    """Response to user."""

    response: str


class Act(BaseModel):
    """Action to perform."""

    action: Union[Response, Plan] = Field(
        # description="Action to perform. If you want to respond to user, use Response. "
        # "If you need to further use tools to get the answer, use Plan."
        description="要执行的操作。如果要响应用户，请使用Response。如果你需要进一步使用工具来得到答案，使用Plan。"
    )


replanner_prompt = ChatPromptTemplate.from_template(
#     """For the given objective, come up with a simple step by step plan. \
# This plan should involve individual tasks, that if executed correctly will yield the correct answer. Do not add any superfluous steps. \
# The result of the final step should be the final answer. Make sure that each step has all the information needed - do not skip steps.
#
# Your objective was this:
# {input}
#
# Your original plan was this:
# {plan}
#
# You have currently done the follow steps:
# {past_steps}
#
# Update your plan accordingly. If no more steps are needed and you can return to the user, then respond with that. Otherwise, fill out the plan. Only add steps to the plan that still NEED to be done. Do not return previously done steps as part of the plan."""
"""
    对于给定的目标，提出一个简单的分步计划。\
    此计划应涉及单个任务，如果正确执行将产生正确的答案。不要添加任何多余的步骤。\
    最后一步的结果应该是最终答案。确保每个步骤都包含所需的所有信息——不要跳过步骤。
    
    你的目标是：
    {input}
    
    你最初的计划是这样的：
    {plan}
    
    您目前已完成以下步骤：
    {past_steps}
    相应地更新您的计划，只在计划中添加仍然需要完成的步骤。不要将以前完成的步骤作为计划的一部分返回。
    
    如果不需要更多步骤，并且您可以返回给用户，使用以下格式返回给用户:
        "action":{{"response":"你的回复"}}
    否则，填写计划，并按以下格式返回：
        "action":{{"steps":["步骤1","步骤2","步骤3"]}}
    不允许使用其它不符合以上格式的返回形式。
    
"""
)

replanner_llm = GetOllama(ip=GetOllama.ailab_linux_ip, model_name="qwen2.5:14b", model_type=1, temperature=0)()
replanner = replanner_prompt | replanner_llm.with_structured_output(Act)


async def execute_step(state: PlanExecute):
    plan = state["plan"]
    plan_str = "\n".join(f"{i+1}. {step}" for i, step in enumerate(plan))
    task = plan[0]
    task_formatted = f"""For the following plan:
{plan_str}\n\nYou are tasked with executing step {1}, {task}."""
    agent_response = await agent_executor.ainvoke(
        {"messages": [("user", task_formatted)]}
    )
    return {
        "past_steps": [(task, agent_response["messages"][-1].content)],
    }


async def plan_step(state: PlanExecute):
    plan = await planner.ainvoke({"messages": [("user", state["input"])]})
    return {"plan": plan.steps}


async def replan_step(state: PlanExecute):
    output = await replanner.ainvoke(state)
    if isinstance(output.action, Response):
        return {"response": output.action.response}
    else:
        return {"plan": output.action.steps}


def should_end(state: PlanExecute):
    if "response" in state and state["response"]:
        return END
    else:
        return "agent"


workflow = StateGraph(PlanExecute)

# Add the plan node
workflow.add_node("planner", plan_step)

# Add the execution step
workflow.add_node("agent", execute_step)

# Add a replan node
workflow.add_node("replan", replan_step)

workflow.add_edge(START, "planner")

# From plan we go to agent
workflow.add_edge("planner", "agent")

# From agent, we replan
workflow.add_edge("agent", "replan")

workflow.add_conditional_edges(
    "replan",
    # Next, we pass in the function that will determine which node is called next.
    should_end,
    ["agent", END],
)

# Finally, we compile it!
# This compiles it into a LangChain Runnable,
# meaning you can use it as you would any other runnable
app = workflow.compile()
# save_graph(app, "langgraph_plan-and-execute.png")


async def my_function():
    config = {"recursion_limit": 50}

    inputs = {"input": " 现在帮我看看有没有人，如果没人就帮我关灯。"}
    async for event in app.astream(inputs, config=config):
        for k, v in event.items():
            if k != "__end__":
                print(v)
asyncio.run(my_function())