import os
from typing import TypedDict, Annotated, Sequence

from dotenv import load_dotenv
from langchain.chat_models import init_chat_model
from langchain_community.tools.tavily_search import TavilySearchResults
from langchain_core.messages import BaseMessage
from langgraph.graph import StateGraph, END
from langgraph.graph import add_messages
from langgraph.prebuilt import ToolNode

from langchain_weather import get_weather
from tool_call import write_file


class AgentState(TypedDict):
    messages: Annotated[Sequence[BaseMessage], add_messages]


load_dotenv(override=True)
DEEPSEEK_API_KEY = os.getenv("DEEPSEEK_API_KEY")
search = TavilySearchResults(max_results=2)
tools = [get_weather, write_file, search]
tool_node = ToolNode(tools)


# 调用大模型
def call_llm(state):
    messages = state["messages"]
    messages = [{"role": "system", "content": "你是一个中文智能小助手。"}] + messages
    model = init_chat_model(model="deepseek-chat", model_provider="deepseek")
    model = model.bind_tools(tools)
    response = model.invoke(messages)
    return {"messages": [response]}


# 一个辅助方法：判断是否需要调用工具
def should_continue(state):
    messages = state["messages"]
    last_message = messages[-1]

    # 根据大模型的反馈来决定是结束，还是调用工具
    if not last_message.tool_calls:
        return "end"
    else:
        return "continue"


# 定义一个graph
workflow = StateGraph(AgentState)

# 增加两个节点
workflow.add_node("llm", call_llm)
workflow.add_node("tools", tool_node)

# 确定入口
workflow.set_entry_point("llm")

# 一个条件边，即从llm节点出来的两个分支及条件
workflow.add_conditional_edges(
    "llm",
    should_continue,
    {
        "continue": "tools",
        "end": END,
    },
)

# tools调用后返回llm
workflow.add_edge("tools", "llm")

# 编译
graph = workflow.compile()

# 本地测试代码
if __name__ == "__main__":
    while True:
        user_input = input("User: ")
        print("User: " + user_input)
        if user_input.lower() in ["quit", "exit", "q"]:
            print("Goodbye!")
            break

        response = graph.invoke({"messages": [("user", user_input)]})
        print(response["messages"][-1].content)
