# 1.获取文档
#
# 2.加载文档
#
# 3.文档切割
#
# 4.向量化处理存入向量数据库
#
# 5.从向量数据库中查询
# 导入所需的模块和类
from langchain.embeddings import CacheBackedEmbeddings
from langchain.storage import LocalFileStore
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import FAISS
from langchain_community.embeddings import DashScopeEmbeddings

from langchain_text_splitters import CharacterTextSplitter

# 实例化向量嵌入器
embeddings = DashScopeEmbeddings()

# 初始化缓存存储器
store = LocalFileStore("../home/cache/")

# 创建缓存支持的嵌入器
cached_embedder = CacheBackedEmbeddings.from_bytes_store(embeddings, store, namespace=embeddings.model)

# 加载文档并将其拆分成片段
doc = TextLoader("../home/doc/NBA新闻.txt", encoding='utf-8').load()
spliter = CharacterTextSplitter("\n", chunk_size=200, chunk_overlap=0)
chunks = spliter.split_documents(doc)
# 创建向量存储
db = FAISS.from_documents(chunks, cached_embedder)
# 以索引的方式保存
db.save_local('key')
db = FAISS.load_local('../home/key', cached_embedder, allow_dangerous_deserialization=True)
res = db.similarity_search("NBA冠军球队是哪个", k=3)
print(res)
