import os 
import json

from langchain_core.messages import SystemMessage, HumanMessage
from langchain_core.output_parsers import StrOutputParser
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_community.chat_message_histories import ChatMessageHistory
from langchain_core.runnables import RunnableWithMessageHistory, RunnableLambda, RunnablePassthrough
from langchain_core.documents import Document
from langchain_chroma import Chroma
from langchain_community.tools.tavily_search import TavilySearchResults
from langgraph.prebuilt import chat_agent_executor

os.environ["LANGSMITH_TRACING_V2"] = "true"
os.environ["LANGSMITH_API_KEY"] = "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"
os.environ["OPENAI_BASE_URL"] = "https://api.chatanywhere.tech/v1"
os.environ["OPENAI_API_KEY"] = "sk-pbXvhNj37SZ5SUBzC1Kx4LeXrsnT9EJNDL6mT2Lj2IbgohKa"
os.environ["TAVILY_API_KEY"] = "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"

model = ChatOpenAI(model="gpt-4o-mini")

search = TavilySearchResults(max_results=1)
# result = search.invoke("请介绍一下狗狗")
# print(result)
# 让模型绑定工具
# model_with_tools = model.bind_tools([search])
# resp = model_with_tools.invoke([HumanMessage(content="请介绍一下狗狗")])
# 模型可以自动推理是否要调用工具
# print(resp.content)  # 模型
# print(resp.tool_calls)  # 调用工具

# 创建代理

agent_executor = chat_agent_executor.create_tool_calling_executor(model, [search])
# resp = agent_executor.invoke({"messages": [ HumanMessage(content="中国的首都是哪个城市")]})
# print(resp)


resp_2 = agent_executor.invoke(
    {
        "messages": [HumanMessage(content="北京的天气怎么样？")]
    }
)
print(resp_2)
result = json.dumps(resp_2.get("messages"), indent=4, ensure_ascii=False, default=str)
with open("001.json", "w", encoding="utf-8") as f:
    f.write('{"messages": ' + result + '}')