from langchain.document_loaders import WebBaseLoader
from qwen_embedding import QwenEmbedding

urls = "https://baike.baidu.com/item/恐龙/139019"
loader = WebBaseLoader(urls)
docs = loader.load()

from langchain_chroma import Chroma
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.retrievers import ParentDocumentRetriever
text_splitter = RecursiveCharacterTextSplitter(
    chunk_size=1000,  # 分割大小，根据需要调整
    chunk_overlap=20  # 分割重叠，根据需要调整
)
split_documents = text_splitter.split_documents(docs)

# 设置向量数据库
qwen_embedding = QwenEmbedding()
# 加载已持久化的向量数据库
vectorstore = Chroma(
    collection_name="full_documents",
    persist_directory="db",
    embedding_function=qwen_embedding
)
# vectorstore.add_documents(split_documents)
# sub_docs = vectorstore.similarity_search("小米SU7有几个版本？")

from langchain.storage import InMemoryStore
docstore = InMemoryStore()

# 创建 ParentDocumentRetriever 实例
retriever = ParentDocumentRetriever(
    vectorstore=vectorstore,
    docstore=docstore,  # 使用 InMemoryDocstore 作为 docstore
    search_type="similarity",  # 搜索类型，例如相似度搜索
    search_kwargs={"k": 4},  # 搜索参数，例如返回最相关的前4个结果
    child_splitter=text_splitter  # 提供 child_splitter 参数
)


retriever.add_documents(docs, ids=None)

related_docs = retriever.invoke('恐龙生活在什么年代')

# 打印检索到的相关文档
for doc in related_docs:
    print(f"Page content: {doc.page_content}")
    print(f"Metadata: {doc.metadata}")
