# 安装依赖 pip install langchain_community
import os

from langchain_openai import AzureChatOpenAI
from langchain_community.chat_message_histories import ChatMessageHistory
from langchain_community.tools import TavilySearchResults
from langchain_core.messages import HumanMessage
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables import RunnableWithMessageHistory
from langgraph.prebuilt import chat_agent_executor

os.environ["LANGCHAIN_TRACING_V2"] = "true"
os.environ["LANGCHAIN_API_KEY"] = "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"

os.environ["AZURE_OPENAI_ENDPOINT"] = "http://menshen.test.xdf.cn"
# os.environ["OPENAI_API_BASE"] = "http://menshen.test.xdf.cn"
os.environ["OPENAI_API_KEY"] = "c8575027653b42b1b47747f0b4ab135b"
os.environ["OPENAI_API_TYPE"] = "azure"
os.environ["OPENAI_API_VERSION"] = "2023-05-15"

os.environ["TAVILY_API_KEY"] = "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"

llm = AzureChatOpenAI(
    deployment_name="gpt-4o",
    model_name="gpt-4o",
    temperature=0
)

# langchain内置了一个工具，可以调用Tavily工具进行搜索
search_tool = TavilySearchResults(max_results=1)
# print(search_tool.invoke("今天北京的天气怎么样"))

# 模型绑定上该工具
# llm_with_tool = llm.bind_tools([search_tool])
#
# 通过模型和工具调用，大模型如果能分析出来，则不会调用工具，反之调用工具
# resp1 = llm_with_tool.invoke("中国的首都是哪个城市")
# print(f'module_result_content: {resp1.content}')
# print(f'tool_result_content: {resp1.tool_calls}')
#
# resp2 = llm_with_tool.invoke("今天北京的天气怎么样")
# print(f'module_result_content: {resp2.content}')
# print(f'tool_result_content: {resp2.tool_calls}')

# 创建代理
agent_executor = chat_agent_executor.create_tool_calling_executor(llm, [search_tool])

resp = agent_executor.invoke({"messages": HumanMessage(content="中国的首都是哪个城市")})
print(resp["messages"])

resp = agent_executor.invoke({"messages": HumanMessage(content="今天北京的天气怎么样")})
print(resp["messages"])


