from langchain import hub
from langchain.agents import create_react_agent, AgentExecutor
from langchain_community.document_loaders import WebBaseLoader
from langchain_community.embeddings import OllamaEmbeddings
from langchain_community.tools import TavilySearchResults
from langchain_community.vectorstores import FAISS
from langchain_core.messages import HumanMessage, AIMessage
from langchain_core.tools import create_retriever_tool
from langchain_ollama import OllamaLLM
from langchain_text_splitters import RecursiveCharacterTextSplitter

llm = OllamaLLM(model="llama3.2:3b")

loader = WebBaseLoader("https://baike.baidu.com/item/%E7%8C%AB/22261")

docs = loader.load()
documents = RecursiveCharacterTextSplitter(
    chunk_size=1000, chunk_overlap=200
).split_documents(docs)

## 1. embeddings LLM
# 替换成本地模型
# embeddings = OllamaEmbedder(model="nomic-embed-text")
embeddings = OllamaEmbeddings(model="nomic-embed-text")
vector = FAISS.from_documents(documents, embeddings)
## 相似度匹配，
retriever = vector.as_retriever()

retriever_tool = create_retriever_tool(
    retriever,
    "baidu_search",
    "搜索维基百科"
)

search = TavilySearchResults(max_results=1)
## tool
tools = [search, retriever_tool]

# 从 LangChain Hub 拉取一个适合 ReAct 的提示模板
prompt = hub.pull("hwchase17/react")

# 创建 ReAct 代理
agent = create_react_agent(llm, tools, prompt)

# 创建代理执行器
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)

# 现在可以运行了
# result = agent_executor.invoke({"input": "猫的特征？北京今天的天气怎么样？"})

result = agent_executor.invoke({
    "chat_history": [
        HumanMessage(content="Hi,我的名字是Cyber"),
        AIMessage(content="你好，Cyber,很高兴见到你！有什么可以帮助你的吗")
    ],
    "input":"我的名字是什么"
})
print(result['output'])
