from langchain_chroma import Chroma
from langchain_core.messages import HumanMessage
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables import RunnableWithMessageHistory, RunnableLambda, RunnablePassthrough
from langchain_ollama import OllamaEmbeddings
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
from langchain_core.documents import Document
from langchain_community.chat_message_histories import ChatMessageHistory
from langchain_community.tools.tavily_search import TavilySearchResults
from langgraph.prebuilt import chat_agent_executor

model = ChatOpenAI(model="ictrek/qwen7b:32k",
                   openai_api_key="ollama",
                   openai_api_base="http://10.2.4.31:11434/v1/")

# langchain内置了一个工具，可以轻松使用Tavily搜索引擎作为工具
# tool = TavilySearchResults(
#     max_results=5, # 最大返回结果
#     include_answer=True,
#     include_raw_content=True,
#     include_images=True,
#     # search_depth="advanced",
#     # include_domains = []
#     # exclude_domains = []
# )
search = TavilySearchResults(max_results=2)
# 绑定工具
model_with_tool = model.bind_tools([search])
resp = model_with_tool.invoke("北京的天气怎么样？")

agent_executor = chat_agent_executor.create_tool_calling_executor(model, [search])
resp = agent_executor.invoke({"message": [HumanMessage("北京天气怎么样？")]})
print(resp["message"])