# 新写法（0.2 之后推荐）
from langchain_community.document_loaders import (
    TextLoader,
    PyPDFLoader,
    Docx2txtLoader,
    UnstructuredExcelLoader,
)
from langchain.text_splitter import CharacterTextSplitter
from langchain_community.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores import FAISS
from langchain_huggingface import HuggingFaceEmbeddings
from langchain.storage import LocalFileStore
from langchain.embeddings.cache import CacheBackedEmbeddings
import hashlib

# 使用本地模型路径
local_model_path = r"D:\soft\sentence-transformers-master\bge-large-zh-v1.5"

embeddings = HuggingFaceEmbeddings(
    model_name=local_model_path,
    model_kwargs={'device': 'cpu'},
    encode_kwargs={'normalize_embeddings': True}
)

# 缓存嵌入
fs = LocalFileStore("./cache/")
cached_embeddings = CacheBackedEmbeddings.from_bytes_store(
    embeddings,
    fs,
    key_encoder=lambda x: hashlib.sha256((embeddings.model_name + x).encode('utf-8')).hexdigest()
)



raw_documents = TextLoader(file_path="file/01.md",encoding="utf8").load()
text_splitter = CharacterTextSplitter(chunk_size=100, chunk_overlap=20, length_function=len)
documents = text_splitter.split_documents(raw_documents)
# print(documents)

db = FAISS.from_documents(documents, cached_embeddings)
print(db)