import os
import dotenv
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain_community.document_loaders import WebBaseLoader
from langchain_openai import OpenAIEmbeddings
from langchain_community.vectorstores import FAISS
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain_core.documents import Document
from langchain.chains.retrieval import create_retrieval_chain
from langchain.chains.history_aware_retriever import create_history_aware_retriever
from langchain_core.prompts import MessagesPlaceholder
from langchain_core.messages import HumanMessage, AIMessage
from langchain.tools.retriever import create_retriever_tool

from langchain_openai import ChatOpenAI
from langchain import hub
from langchain.agents import create_openai_functions_agent
from langchain.agents import AgentExecutor


dotenv.load_dotenv()
openai_url = os.getenv('OPENAI_API_BASE')
api_key = os.getenv('OPENAI_API_KEY')

llm = ChatOpenAI(openai_api_key=api_key,openai_api_base=openai_url)

# prompt = ChatPromptTemplate.from_messages([
#     ("system", "你是一个高级工程师。"),
#     ("user", "{input}")
# ])

# # 使用输出解析器
output_parser = StrOutputParser()

# # 组合成一个简单的 LLM 链
# chain = prompt | llm | output_parser
# # 使用LLM链
# res = chain.invoke({"input": "Langsmith 如何帮助进行测试?"})
# print(res)

loader = WebBaseLoader("https://docs.smith.langchain.com/user_guide")
docs = loader.load()

embeddings = OpenAIEmbeddings(openai_api_key=api_key,openai_api_base=openai_url)

# 使用分割器分割文档
text_splitter = RecursiveCharacterTextSplitter()
documents = text_splitter.split_documents(docs)
# 向量存储
vector = FAISS.from_documents(documents, embeddings)


prompt = ChatPromptTemplate.from_template("""仅根据提供的上下文回答以下问题:

<context>
{context}
</context>

Question: {input}""")

# 创建链，该链获取文档列表并将它们全部格式化为提示，然后将该提示传递给LLM。它传递所有文档，因此应该确保它适合正在使用的 LLM 上下文窗口
document_chain = create_stuff_documents_chain(llm, prompt)

# text ="langsmith can let you visualize test results"
# res = document_chain.invoke({
#     "input": "Langsmith 如何帮助进行测试?",
#     "context": [Document(page_content=text)]
# })
# print(res)

# 创建向量存储检索器
# retriever = vector.as_retriever()
# # 创建链，该链接收用户查询，然后将其传递给检索器以获取相关文档。然后将这些文档（和原始输入）传递到 LLM 以生成响应
# retrieval_chain = create_retrieval_chain(retriever, document_chain)
# # 执行检索 这将返回一个字典
# response = retrieval_chain.invoke({"input": "how can langsmith help with testing?"})
# print(response["answer"])


# prompt = ChatPromptTemplate.from_messages([
#     MessagesPlaceholder(variable_name="chat_history"),
#     ("user", "{input}"),
#     ("user", "鉴于上述对话，生成一个搜索查询以查找以获取与对话相关的信息")
# ])
# retriever_chain = create_history_aware_retriever(llm, retriever, prompt)

# chat_history = [HumanMessage(content="LangSmith 可以帮助测试我的 LLM 应用程序吗?"), AIMessage(content="Yes!")]
# res = retriever_chain.invoke({
#     "chat_history": chat_history,
#     "input": "告诉我怎么做"
# })

# print(res)

# prompt = ChatPromptTemplate.from_messages([
#   ("system", "根据以下上下文回答用户的问题:\n\n{context}"),
#     MessagesPlaceholder(variable_name="chat_history"),
#     ("user", "{input}"),])

# document_chain = create_stuff_documents_chain(llm, prompt)
# retrieval_chain = create_retrieval_chain(retriever_chain, document_chain)

# 测试
# chat_history = [HumanMessage(content="LangSmith 可以帮助测试我的 LLM 应用程序吗?"), AIMessage(content="Yes!")]
# retrieval_chain.invoke({
#     "chat_history": chat_history,
#     "input": "Tell me how"
# })

retriever = vector.as_retriever()
retriever_tool = create_retriever_tool(
    retriever,
    "langsmith_search",
    "搜索有关 LangSmith 的信息。对于有关LangSmith的任何问题，您必须使用此工具!",
)


tools = [retriever_tool]


# 获取使用提示 可以修改它
prompt = hub.pull("hwchase17/openai-functions-agent")
# 初始化大模型
llm = ChatOpenAI(openai_api_key=api_key,openai_api_base=openai_url)
# 创建一个openai_functions_agent代理
agent = create_openai_functions_agent(llm, tools, prompt)
# 创建代理执行器
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)

# 测试
agent_executor.invoke({"input": "Langsmith 如何帮助进行测试?"})