from langchain.embeddings import CacheBackedEmbeddings
from langchain.storage import LocalFileStore
from langchain_community.document_loaders import TextLoader
from langchain_ollama import OllamaEmbeddings
import hashlib,os
from langchain_community.vectorstores import FAISS

from langchain_text_splitters import CharacterTextSplitter

# 使用更强的SHA-256编码器
def sha256_encoder(text: str) -> str:
    return hashlib.sha256(text.encode()).hexdigest()

#使用Ollama的嵌入模型
embeddings_model = OllamaEmbeddings(
        base_url='http://192.168.2.208:11434',
        model="nomic-embed-text")

store = LocalFileStore("C:\\Users\\Dell\\workspace\\ai_example\\cache\\")
cached_embedder = CacheBackedEmbeddings.from_bytes_store(
    embeddings_model, store,
    key_encoder=sha256_encoder,
)
current_dir = os.path.dirname(os.path.abspath(__file__))
file_path = os.path.join(current_dir,"state_of_the_union.txt")
raw_documents = TextLoader(file_path
                           ,encoding="utf-8").load()
# text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
text_splitter = CharacterTextSplitter(
    separator=" ",
    chunk_size=400,
    chunk_overlap=200,
    length_function=len,
    is_separator_regex=False,
)
documents = text_splitter.split_documents(raw_documents)
db = FAISS.from_documents(documents, cached_embedder)
print(list(store.yield_keys())[:5])

query = "where did i see the power of hope"
docs = db.similarity_search(query)
print(len(docs))
print(docs[0].page_content)
