host = "localhost"

from langchain_community.document_loaders import WebBaseLoader
loader = WebBaseLoader("https://blog.csdn.net/fanghailiang2016/article/details/140859711")
data = loader.load()

from langchain.text_splitter import RecursiveCharacterTextSplitter
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
splits = text_splitter.split_documents(data)

from langchain.embeddings.ollama import OllamaEmbeddings
embedding = OllamaEmbeddings(base_url='http://' + host + ':11434', model="nomic-embed-text")

from langchain_community.vectorstores import FAISS
vectordb = FAISS.from_documents(documents=splits, embedding=embedding)

from langchain_community.chat_models import ChatOllama
from langchain.retrievers.multi_query import MultiQueryRetriever
from langchain.chains import RetrievalQA # RetrievalQA链

llm = ChatOllama(base_url='http://' + host + ':11434', model="mistral")

retriever_from_llm = MultiQueryRetriever.from_llm(retriever=vectordb.as_retriever(), llm=llm)

# 实例化一个RetrievalQA链
qa_chain = RetrievalQA.from_chain_type(llm,retriever=retriever_from_llm)

question = "怎么安装 nomic-embed-text 文本嵌入模型？"
result = qa_chain({"query": question})

print(result, end="\n")