"""
加载web网页数据
"""

import bs4
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain_community.document_loaders import WebBaseLoader
from langchain_community.vectorstores import Chroma
from langchain_core.prompts import ChatPromptTemplate
from langchain_text_splitters import RecursiveCharacterTextSplitter

from models import get_ds_model_client, get_ollama_embeddings_client

loader = WebBaseLoader(web_path="https://www.gov.cn/xinwen/2020-06/01/content_5516649.htm",
                       bs_kwargs=dict(parse_only=bs4.SoupStrainer(id=("UCAP-CONTENT"))))
docs = loader.load()

splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=50)
documents = splitter.split_documents(docs)


# 调用大模型搜索
client = get_ds_model_client()
prompt = ChatPromptTemplate.from_messages(
    [
     ("system", "根据提供的上下文: {context} \n\n 回答问题"),
     ("user", "问题：{input}"),  # 用户输入的问题
     ]
)
chain = create_stuff_documents_chain(client, prompt)
res = chain.invoke({"input": "民事法律行为？", "context": documents[:5]})
print("调用大模型搜索结果为：", res)

# 对内容进行向量搜索
embeddings_client = get_ollama_embeddings_client()
vector_store = Chroma.from_documents(documents[:5], embeddings_client)
docs = vector_store.similarity_search("民事法律行为？", k=2)
print("向量搜索结果为：")
for doc in docs:
    print(f' {doc.page_content[:]}\n')
