from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import FAISS
from langchain.document_loaders import TextLoader


# 实例化文档加载器
loader = TextLoader("./tate_of_the_union.txt")
# 加载文档
documents = loader.load()

# 实例化文本分割器
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
# 分割文本
docs = text_splitter.split_documents(documents)

print(docs)

embeddings = OpenAIEmbeddings()
db = FAISS.from_documents(docs, embeddings)

query = "啤酒的基本原料是什么？"

docs = db.similarity_search(query)

print(docs[0].page_content)

db.save_local("faiss_index")