import json
from typing import Annotated

from langchain_core.messages import ToolMessage
from langchain_core.tools import tool
from langgraph.constants import START, END
from langgraph.graph import add_messages, StateGraph
from typing_extensions import TypedDict

from src.ai.langchain.init_llm import get_llm

llm = get_llm()


@tool
def get_weather(city: str) -> str:
    """获取城市的天气情况"""
    return f"{city}天气晴转多云，最低气温 28.5℃"


search_tools = [get_weather]


class BaseToolNode:
    def __init__(self, tools: list) -> None:
        self.tools_map = {t.name: t for t in tools}

    def __call__(self, inputs: dict):
        if messages := inputs.get("messages", []):
            # messages数据结构：{'messages': [HumanMessage(content='保定天气怎么样', additional_kwargs={}, response_metadata={}, id='4d1416ec-89d3-43e3-904e-b57045ddd50a'), AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_dBeBk7Frqhqiz6f8Z57jYktb', 'function': {'arguments': '{"city":"保定"}', 'name': 'get_weather'}, 'type': 'function'}], 'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 16, 'prompt_tokens': 47, 'total_tokens': 63, 'completion_tokens_details': {'accepted_prediction_tokens': 0, 'audio_tokens': 0, 'reasoning_tokens': 0, 'rejected_prediction_tokens': 0}, 'prompt_tokens_details': {'audio_tokens': 0, 'cached_tokens': 0}}, 'model_name': 'gpt-4o-2024-05-13', 'system_fingerprint': 'fp_ee1d74bde0', 'id': 'chatcmpl-Btg04tQap6H2x9ZFFu5PS7RPzS1f2', 'service_tier': None, 'finish_reason': 'tool_calls', 'logprobs': None, 'content_filter_results': {}}, id='run--ce133e95-106c-44c8-a2cf-ee2cb215be8b-0', tool_calls=[{'name': 'get_weather', 'args': {'city': '保定'}, 'id': 'call_dBeBk7Frqhqiz6f8Z57jYktb', 'type': 'tool_call'}], usage_metadata={'input_tokens': 47, 'output_tokens': 16, 'total_tokens': 63, 'input_token_details': {'audio': 0, 'cache_read': 0}, 'output_token_details': {'audio': 0, 'reasoning': 0}})]}
            # 最后一个消息就是AI分析出来的需要调用的工具，所以解析最后一个消息就行了
            message = messages[-1]
        else:
            raise ValueError("No message found in input")
        outputs = []
        for tool_call in message.tool_calls:
            tool_result = self.tools_map[tool_call['name']].invoke(tool_call['args'])
            outputs.append(
                ToolMessage(
                    tool_call_id=tool_call['id'],
                    name=tool_call['name'],
                    content=json.dumps(tool_result),
                )
            )
        return {"messages":  outputs}


class State(TypedDict):
    messages: Annotated[list, add_messages]


def chatbot(state: State):
    return {"messages": [llm.invoke(state['messages'])]}


def route_tools(state: State):
    if isinstance(state, list):
        ai_message = state[-1]
    elif messages := state.get("messages", []):
        ai_message = messages[-1]
    else:
        raise ValueError(f"No messages found{state}")
    if hasattr(ai_message, "tool_calls") and len(ai_message.tool_calls) > 0:
        return "tools"
    return END


graph_builder = StateGraph(State)

tool_node = BaseToolNode(search_tools)

graph_builder.add_node("tools", tool_node)
graph_builder.add_node("chatbot", chatbot)

graph_builder.add_edge(START, "chatbot")
graph_builder.add_conditional_edges("chatbot", route_tools, {"tools": "tools", END: END})


# graph_builder.add_edge("chatbot", END)
graph = graph_builder.compile()

# 生成图
# png = graph.get_graph().draw_mermaid_png()
#
# with open("langgraph_chatbot.png", "wb") as f:
#     f.write(png)

history = []


def stream_graph_updates(user_input: str, history: list):
    history += [{"role": "human", "content": user_input}]

    print("完成的请求消息：", history)

    result = graph.invoke({"messages": history})

    result_content = result["messages"][-1].content

    print("智能助手：", result_content)

    history.append({"role": "assistant", "content": result_content})

    # for event in graph.stream({"messages": [{"role": "human", "content": user_input}]}):
    #     for value in event.values():
    #         # print("聊天助手:", value["messages"][-1].content)
    #         print("聊天助手:", value["messages"])


while True:
    try:
        user_input = input("请输入: ")
        if user_input.lower() in ["quit", "exit", "q"]:
            print("Goodbye!")
            break
        stream_graph_updates(user_input, history)
    except:
        # fallback if input() is not available
        user_input = "What do you know about LangGraph?"
        print("异常，用户输入内容为: " + user_input)
        stream_graph_updates(user_input, history)
        break
