"""
如何创建和查询向量存储
"""
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Chroma
from langchain_text_splitters import RecursiveCharacterTextSplitter

from models import get_ollama_embeddings_client

file_path = "../data/document/公司管理制度.txt"
# 返回的文档列表
raw_documents = TextLoader(file_path, encoding="utf-8").load()

# 定义标题作为分隔符
separators = [
    r"\n[一二三四五六七八九十]、",  # 匹配中文序号
    r"\n\d+、",  # 匹配数字序号
    r"\n[A-Za-z]、"  # 匹配英文字母序号
]

# 创建文本分割器
text_splitter = RecursiveCharacterTextSplitter(
    separators=separators,
    chunk_size=200,
    chunk_overlap=0,
    keep_separator=True,
    is_separator_regex=True
)

split_docs = text_splitter.split_documents(raw_documents)
print(f"拆分后的文档数量: {len(split_docs)}")

embeddings_client = get_ollama_embeddings_client()
db = Chroma.from_documents(split_docs, embeddings_client)

docs = db.similarity_search("试用与转正", k=1)
print(docs)

# 也可以向量比较相似性
vector = embeddings_client.embed_query("试用与转正")
docs_1 = db.similarity_search_by_vector(vector, k=1)
print(docs_1)


