from langchain.embeddings import CacheBackedEmbeddings
from langchain.storage import LocalFileStore
from langchain_community.document_loaders import TextLoader
from langchain.text_splitter import CharacterTextSplitter
from langchain_openai import OpenAIEmbeddings
import __init__
# 嵌入向量缓存
u_embeddings = OpenAIEmbeddings()
fs = LocalFileStore("../kecheng源码/cache/")
cached_embeddings = CacheBackedEmbeddings.from_bytes_store(
    u_embeddings,
    fs,
    namespace=u_embeddings.model,
)
print(list(fs.yield_keys()))

# 加载文档，切分文档，将切分文档向量化并存储在缓存中
raw_documents = TextLoader("../kecheng源码/letter.txt").load()
text_splitter = CharacterTextSplitter(chunk_size=600, chunk_overlap=0)
documents = text_splitter.split_documents(raw_documents)
print(documents)

# from langchain.vectorstores import FAISS
# %timeit -r 1 -n 1 db= FAISS.from_documents(documents, cached_embeddings)

# 查看缓存中的键
print(list(fs.yield_keys()))