# https://docs.langchain.com/oss/python/langgraph/quickstart
# NOTE:上面教程的改进点
# 0. 整个教程感觉与LangGraph 1.0.0版本相比比较陈旧
# 1. 引入llms.py，改为支持qwen在线模型
# 2. State类不使用operator.add，改为langgraph.graph.message.add_messages

import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from utils.llms import initialize_llm
from utils.kit import save_graph_visualization

from langchain.tools import tool
from langchain.messages import AnyMessage
from typing_extensions import TypedDict, Annotated
# import operator
from langgraph.graph.message import add_messages
from langchain.messages import SystemMessage
from langchain.messages import ToolMessage

from typing import Literal
from langgraph.graph import StateGraph, START, END
from langchain.messages import HumanMessage

import logging

# 设置日志模版
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)


model_with_tools = None

# Define tools
# @tool 装饰器必须提供函数说明
@tool
def multify(x: int, y: int) -> int:
    """Multiply `x` and `y`.

    Args:
        a: First int
        b: Second int
    """    
    return x * y

@tool
def add(x: int, y: int) ->int:
    """Adds `x` and `y`.

    Args:
        a: First int
        b: Second int
    """    
    return x + y

@tool
def divide(x: int, y: int) -> float:
    """Divide `x` and `y`.

    Args:
        a: First int
        b: Second int
    """    
    # TODO: y == 0
    return x/y


# 0. model
model = initialize_llm()


# 1. tools
tools = [add, multify, divide]
tools_byname = {tool.name: tool for tool in tools}
model_with_tools = model.bind_tools(tools)

# 状态
class State(TypedDict):
    messages: Annotated[list[AnyMessage],add_messages]
    # NOTE:不使用下面operator的方式，改使用专用的add_messages
    # messages: Annotated[list[AnyMessage], operator.add]
    model_calls: int

# model节点
def llm_call(state: State):
    """LLM决定是否调用tool"""
    return {
        "messages": [
            model_with_tools.invoke(
                [
                    SystemMessage(
                        content = "你是个有用的智能助手"
                    )
                ]
                + state["messages"]
            )
        ],
        "llm_calls": state.get('llm_calls', 0) + 1
    }

# tool 节点
def tool_node(state: State):
    """工具调用"""
    result = []
    for tool_call in state["messages"][-1].tool_calls:
        tool = tools_byname[tool_call["name"]]
        observation = tool.invoke(tool_call["args"])
        result.append(ToolMessage(content=observation, tool_call_id = tool_call["id"]))

    return {"messages": result}


# end logic
def should_continue(state: State) -> Literal["tool_node", END]:
    """决定继续循环或停止"""
    messages = state["messages"]
    last_message = messages[-1]

    if last_message.tool_calls:
        return "tool_node"
    
    return END


def main():
    # global model_with_tools

    print("Langgraph quick start.")

    graph = StateGraph(State)

    graph.add_node("llm_call", llm_call)
    graph.add_node("tool_node", tool_node)

    graph.add_edge(START, "llm_call")
    graph.add_conditional_edges(
        "llm_call",
        should_continue,
        ["tool_node", END]
    )
    graph.add_edge("tool_node", "llm_call")

    graph = graph.compile()
    logger.info(f"图编译成功")

    # 将Graph可视化图保存
    save_graph_visualization(graph)

    # invoke
    messages = [HumanMessage(content="3加4")]
    messages = graph.invoke({"messages": messages})
    for msg in messages["messages"]:
        msg.pretty_print()


if __name__ == "__main__":
    main()
