import os
from dotenv import find_dotenv, load_dotenv

load_dotenv(find_dotenv())
# OPENAI_API_BASE=os.environ['OPENAI_API_BASE']
# OPENAI_API_KEY=os.environ['OPENAI_API_KEY']
# TAVILY_API_KEY = os.environ["TAVILY_API_KEY"]
TAVILY_API_KEY = "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"

# 设置langSmith的环境变量
# LANGCHAIN_TRACING_V2=os.environ['LANGCHAIN_TRACING_V2']
# LANGCHAIN_ENDPOINT=os.environ['LANGCHAIN_ENDPOINT']
# LANGCHAIN_API_KEY=os.environ['LANGCHAIN_API_KEY']

LANGCHAIN_ENDPOINT = "https://api.smith.langchain.com"
LANGCHAIN_API_KEY = "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"

from langchain_openai import ChatOpenAI
from langchain_community.tools.tavily_search import TavilySearchResults
from langchain_core.tools import tool
from langchain_experimental.tools import PythonREPLTool
from langchain.agents import AgentExecutor, create_openai_tools_agent
from langchain_core.messages import BaseMessage, HumanMessage
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.output_parsers.openai_functions import JsonOutputFunctionsParser
import operator
from typing import Annotated, Any, Dict, List, Optional, Sequence, TypedDict
import functools
from langgraph.graph import StateGraph, END

tavily_tool = TavilySearchResults(max_results=5)

python_repl_tool = PythonREPLTool()  # Python交互工具


# 代理构造工具
def create_agent(llm: ChatOpenAI, tools: list, system_prompt: str):
    prompt = ChatPromptTemplate.from_messages(
        [
            (
                "system",
                system_prompt,
            ),
            MessagesPlaceholder(variable_name="messages"),
            MessagesPlaceholder(variable_name="agent_scratchpad"),
        ]
    )
    agent = create_openai_tools_agent(llm, tools, prompt)
    executor = AgentExecutor(agent=agent, tools=tools)
    return executor


# 代理节点构造工具
def agent_node(state, agent, name):
    result = agent.invoke(state)
    return {"messages": [HumanMessage(content=result["output"], name=name)]}


# 创建管理者节点
members = ["Researcher", "Coder"]  # 成员列表,节点的名字
system_prompt = (
    "You are a supervisor tasked with managing a conversation between the"
    " following workers:  {members}. Given the following user request,"
    " respond with the worker to act next. Each worker will perform a"
    " task and respond with their results and status. When finished,"
    " respond with FINISH."
)
# Our team supervisor is an LLM node. It just picks the next agent to process
# and decides when the work is completed
options = ["FINISH"] + members  # 选项列表
# Using openai function calling can make output parsing easier for us
function_def = {  # 定义的一个路由函数,用于决定下一个节点
    "name": "route",
    "description": "Select the next role.",
    "parameters": {
        "title": "routeSchema",
        "type": "object",
        "properties": {
            "next": {
                "title": "Next",
                "anyOf": [
                    {"enum": options},
                ],
            }
        },
        "required": ["next"],
    },
}
prompt = ChatPromptTemplate.from_messages(
    [
        ("system", system_prompt),
        MessagesPlaceholder(variable_name="messages"),  # 将历史消息存储到消息列表中
        (
            "system",
            "Given the conversation above, who should act next?"
            " Or should we FINISH? Select one of: {options}",  # 提示词,根据对话历史,选择下一个节点
        ),
    ]
).partial(options=str(options), members=", ".join(members))

llm = ChatOpenAI(
    api_key="ollama",
    model="qwen2.5:7b",
    base_url="http://192.168.10.11:60026/v1",
    temperature=0.7,
)

supervisor_chain = (
    prompt
    | llm.bind_functions(functions=[function_def], function_call="route")
    | JsonOutputFunctionsParser()
)

"""构建图的状态机"""


# The agent state is the input to each node in the graph
class AgentState(TypedDict):
    # The annotation tells the graph that new messages will always
    # be added to the current states
    messages: Annotated[Sequence[BaseMessage], operator.add]
    # The 'next' field indicates where to route to next
    next: str


# 构建图中的节点
research_agent = create_agent(
    llm, [tavily_tool], "You are a web researcher."
)  # 创建一个代理,传入搜索工具
research_node = functools.partial(
    agent_node, agent=research_agent, name="Researcher"
)  # 通过partial函数,将代理和名字绑定在一起,创建出节点
# partial 函数是 functools 模块中的一个工具函数，用于创建一个新的函数，该函数是对原函数进行部分应用（partial application）后得到的。部分应用是指固定函数的一部分参数，然后返回一个接受剩余参数的新函数。

# NOTE: THIS PERFORMS ARBITRARY CODE EXECUTION. PROCEED WITH CAUTION警告信息
code_agent = create_agent(
    llm,
    [python_repl_tool],
    "You may generate safe python code to analyze data and generate charts using matplotlib.",
)
code_node = functools.partial(agent_node, agent=code_agent, name="Coder")

workflow = StateGraph(AgentState)
workflow.add_node("Researcher", research_node)  # 搜索节点
workflow.add_node("Coder", code_node)  # 编码节点
workflow.add_node("supervisor", supervisor_chain)  # 管理者节点

# 创建边
for member in members:  # 将所有的成员节点和管理者节点连接起来
    # 所有的节点完成工作后都需要向管理者节点进行反馈
    workflow.add_edge(member, "supervisor")
# 管理者节点根据结果的选择,选择下一个节点

conditional_map = {k: k for k in members}
conditional_map["FINISH"] = END
workflow.add_conditional_edges(
    "supervisor",  # 节点名称
    lambda x: x[
        "next"
    ],  # 条件函数,根据输入的x,获取next字段的值也就是state中的next字段,state在这里被管理者节点更新
    conditional_map,  # 映射关系,存储的下一个节点的信息
)
"""
    lamda x: x["next"]是一个函数,这个函数的作用是从输入的x中获取next字段的值
    这里的x是一个字典,包含了messages和next两个字段,也就是AgentState
"""

# Finally, add entrypoint
workflow.set_entry_point("supervisor")  # 设置入口节点
# 编译图
graph = workflow.compile()

# 执行图
res = graph.invoke({"messages": [HumanMessage(content="LangGraph最新资讯")]})
print(res["messages"][-1].content)
