import os 
import requests
from bs4 import BeautifulSoup, SoupStrainer

from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain.chains.retrieval import create_retrieval_chain

from langchain_community.document_loaders import WebBaseLoader
from langchain_core.messages import SystemMessage, HumanMessage
from langchain_core.output_parsers import StrOutputParser
from langchain.chains.history_aware_retriever import create_history_aware_retriever
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_community.chat_message_histories import ChatMessageHistory
from langchain_core.runnables import RunnableWithMessageHistory, RunnableLambda, RunnablePassthrough
from langchain_core.documents import Document
from langchain_chroma import Chroma
from langchain_community.tools.tavily_search import TavilySearchResults
from langchain_text_splitters import RecursiveCharacterTextSplitter


os.environ['http_proxy'] = '127.0.0.1:7890'
os.environ['https_proxy'] = '127.0.0.1:7890'

os.environ["LANGSMITH_TRACING_V2"] = "true"
os.environ["LANGSMITH_API_KEY"] = "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"
os.environ["OPENAI_BASE_URL"] = "https://api.chatanywhere.tech/v1"
os.environ["OPENAI_API_KEY"] = "sk-pbXvhNj37SZ5SUBzC1Kx4LeXrsnT9EJNDL6mT2Lj2IbgohKa"
os.environ["TAVILY_API_KEY"] = "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"

session = requests.Session()
session.headers.update({
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'
})
model = ChatOpenAI(model="gpt-4o-mini")

# 加载数据
loader = WebBaseLoader(
    web_path=['https://lilianweng.github.io/posts/2023-06-23-agent/'],
    session=session,
    bs_kwargs=dict(
        parse_only=SoupStrainer(class_=('post-header', 'post-title', 'post-content'))
    )
)
docs = loader.load()

# 切割文本, 每次切割1000，允许重复 200
splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
# 切割文档
splits = splitter.split_documents(docs)
# 2、存储
vectorstore = Chroma.from_documents(documents=splits, embedding=OpenAIEmbeddings())

# 3、检索器
retriever = vectorstore.as_retriever()

# 创建一个问题的模板
system_prompt = """You are an assistant for question-answering tasks. 
Use the following pieces of retrieved context to answer 
the question. If you don't know the answer, say that you 
don't know. Use three sentences maximum and keep the answer concise.\n

{context}
"""
prompt = ChatPromptTemplate.from_messages(  # 提问和回答的 历史记录  模板
    [
        ("system", system_prompt),
        ("human", "{input}"),
    ]
)

# 整合
chain = create_retrieval_chain(retriever, create_stuff_documents_chain(model, prompt))
resp = chain.invoke({"input": "请介绍一个什么是 Self-Reflection?"})
print(resp['answer'])
