from langchain_core.tools import tool
from langchain_core.messages import SystemMessage, AIMessage
import random
from typing import Annotated, Literal
from typing_extensions import TypedDict
from langgraph.graph.message import add_messages
from customize.get_ollama import GetOllama
from langgraph.graph import END, START, StateGraph
from langgraph.prebuilt import ToolNode
from customize.save_image import save_graph

light_state = False


@tool
def people_detection() -> str:
    """
    检测环境中是否有人，并返回检测结果。
    :return: 当检测到人是返回"环境中检测到人"，没有检测到返回"环境中没有人"
    """
    value = random.choice([True, False])
    #value = False
    print(f"环境检测结果{value}")
    if value:
        return "环境中检测到人"
    else:
        return "环境中没有人"


@tool
def light_control(control_instruction: bool) -> str:
    """
    通过输入灯光控制命令，控制灯的开和关。
    :param control_instruction:True表示开灯，False表示关灯。
    :return: 灯开则返回"已经开灯"，灯关闭返回"已经关灯"
    """

    if control_instruction:
        global light_state
        print("Turn on the light!")
        light_state = True
        print("开灯动作")
        return "已经开灯"
    else:
        print("Turn off the light!")
        light_state = False
        print("关灯动作")
        return "已经关灯"


@tool
def light_detection() -> str:
    """
    检测当前灯光的状态
    :return: 开返回"灯是开的状态"，关返回"灯是关的状态"
    """
    print(f"灯光检测结果：{light_state}")
    if light_state:
        return "灯是开的状态"
    else:
        return "灯是关的状态"



class State(TypedDict):
    # Messages have the type "list". The 'add_message' function
    # in the annotation defines how this state key should be updated
    # in this case, it appends messages to the list, rather than overwriting them
    messages: Annotated[list, add_messages]


# llm = GetOllama(model_name="llama3.1", model_type=1)()
llm = GetOllama(model_name="qwen2.5:14b", model_type=1, temperature=0)()
tools = [people_detection, light_control, light_detection]
llm = llm.bind_tools(tools)


def ctrl_agent(state: State):
    # Prompt
    system = """你作为一个管理助手，接收用户输入或工具的输入，并依据以下要求做出反应：
    当收到用户输入的文本时，首先剖析该文本的任务意图，任务可能涵盖检测环境中是否有人、检测环境的开关灯状态以及控制灯光开关这三种。
    请依照以下逻辑调用工具：
    若需检测环境是否开灯，调用工具 [light_detection] 进行检测。
    若需开关灯，调用工具 [light_control] 进行操作。
    若需检测环境中是否有人，需先调用工具 [light_detection] 检测环境是否开灯，再依据检测结果选择以下一项进行操作：
    1）处于开灯状态时，使用工具 [people_detection] 检测环境中是否有人。
    2）处于关灯状态时，先调用工具 [light_control] 开灯，再使用工具 [people_detection] 检测环境中是否有人。
    你必须规划好执行的步骤，根据规划一步一步调用工具执行任务，并把每一步执行的结果记录，
    将每一步的操作都反馈给用户，并把最终的环境状况和开关灯结果告知用户。
    """
    print("--call agent--")
    messages = state['messages']
    if not isinstance(messages[0], SystemMessage):
        messages.insert(0, SystemMessage(content=system))
    response = llm.invoke(messages)
    print(response)
    return {"messages": [response]}


def should_continue(state: State) -> Literal["people_detection", "light_control", "light_detection", END]:
    print("should_continue")
    messages = state['messages']
    last_message = messages[-1]
    # If the LLM makes a tool call, then we route to the "tools" node
    if isinstance(last_message, AIMessage) and last_message.tool_calls:
        tool_call = last_message.tool_calls[0]
        print(tool_call['name'])
        return tool_call['name']
    return END


workflow = StateGraph(State)
workflow.add_node("agent", ctrl_agent)
workflow.add_node("light_detection", ToolNode(tools=[light_detection]))
workflow.add_node("light_control", ToolNode(tools=[light_control]))
workflow.add_node("people_detection", ToolNode(tools=[people_detection]))

workflow.add_edge(START, "agent")
workflow.add_conditional_edges("agent", should_continue)
workflow.add_edge("light_control", "agent")
workflow.add_edge("people_detection", "agent")
workflow.add_edge("light_detection", "agent")
workflow.add_edge("light_detection", "agent")
app = workflow.compile()

# save_graph(app, "ctrl_light.png")

inputs = {
    "messages": [
        # ("user", "What does Lilian Weng say about the types of agent memory?"),
        ("user", "检测环境中是否有人。如果没有人，则关灯，如果有人，则不要关灯。")
    ]
}
config = {"configurable": {"thread_id": "1"}}
events = app.stream(
   inputs
)
for event in events:
    if "messages" in event:
        msg = event["messages"][-1]
        msg.pretty_print()
        if msg.content:
            print(msg.content)
