# tavily搜索工具的使用以及代理agent

import os

from langchain.agents import Agent, ZeroShotAgent, AgentExecutor
from langchain.chains.llm import LLMChain
from langchain_community.tools import TavilySearchResults
from langchain_core.messages import HumanMessage, AIMessage
from langchain_core.prompts import PromptTemplate
from langchain_deepseek import ChatDeepSeek
from langgraph.prebuilt import chat_agent_executor

os.environ["LANGCHAIN_TRACING_V3"] = "true"
os.environ["LANGCHAIN_API_KEY"] = "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"
os.environ["LANGCHAIN_PROJECT"] = "langchain-community-demo"
os.environ["TAVILY_API_KEY"] = "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"

deepseek_api_key = "sk-1dd16a258a73428d910d38c782e1c94f"

# 1、创建模型
model = ChatDeepSeek(
    api_key=deepseek_api_key,
    # deepseek-reasoner : DeepSeek-R1
    model="deepseek-chat",
    temperature=0.7,
    max_tokens=1024
)

# langchain内置了一个tavily搜索引擎工具，可以直接使用
search = TavilySearchResults(max_results=2)
# 手动调用工具获取答案（下面通过大模型自动调用）
# result = search.invoke("今天山东济南的天气怎么样？")
# print(result)

# 模型绑定工具
tools = [search]
# model_with_tools = model.bind_tools(tools)

# 模型可以自动推理：是否需要调用工具去完成用户的答案
# resp = model_with_tools.invoke([HumanMessage(content="中国的首都是哪个城市？")])
# print(f'model tools resp : {resp}')
# print(f'model tools resp Calls : {resp.tool_calls}')
#
# resp1 = model_with_tools.invoke("济南天气怎么样？")
# print(f'model tools resp1 : {resp1}')
# print(f'model tools resp1 Calls : {resp1.tool_calls}')

# 定义提示模板
prefix = """Answer the following questions as best you can, but think step-by-step and justify your answer. You have 
access to the following tools:"""
suffix = """Begin!"

Question: {input}
Thought: I should use the {tool_name} tool.
Action: {tool_name}
Action Input: {tool_input}
Observation: {observation}
Thought: I now know the final answer.
Final Answer: {answer}"""
prompt = PromptTemplate(
    input_variables=["input", "tool_name", "tool_input", "observation", "answer"],
    template=prefix + "\n\n" + suffix
)

# 创建代理 langgraph
# llm_chain = LLMChain(llm=model, prompt=prompt)
llm_chain = prompt | model
agent = ZeroShotAgent.from_llm_and_tools(llm=model, tools=tools)
agent_executor = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True)
messages = [HumanMessage(content="今日NBA快船和骑士谁赢了，最终比分是多少?")]
# 这里的input key要和上面的prompt众定义的变量保持一致
result = agent_executor.invoke({'input': messages})
print(result)
# agent_executor = chat_agent_executor.create_tool_calling_executor(model, tools)
# resp = agent_executor.invoke({'messages': [HumanMessage(content="中国的首都是哪个城市？")]})
# print(resp)
#
# if resp.tool_calls:
#     tool_messages = []
#     for tool_call in resp.tool_calls:
#         # 这里假设工具调用的结果可以直接获取，实际中可能需要根据 tool_call 的内容调用相应的工具
#         tool_result = search.invoke(tool_call.tool_input)
#         tool_messages.append(AIMessage(content=str(tool_result), additional_kwargs={'tool_call_id': tool_call.id}))
#
#     # 第二次调用，传递工具消息
#     final_resp = agent_executor.invoke({'messages': tool_messages})
#     print(final_resp)
# else:
#     print(resp)
