
from major.models_manager import embedding_model
from langchain_chroma import Chroma
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_core.documents import Document
import os


text_splitter = RecursiveCharacterTextSplitter(
    # Set a really small chunk size, just to show.
    chunk_size=100,
    chunk_overlap=20,
    length_function=len,
    is_separator_regex=False,
    separators=[
        "\n\n",
        "\n",
        " ",
        ".",
        ",",
        "\u200b",  # Zero-width space
        "\uff0c",  # Fullwidth comma
        "\u3001",  # Ideographic comma
        "\uff0e",  # Fullwidth full stop
        "\u3002",  # Ideographic full stop
        "",
    ],
)

docs = []
# 1.加载文档
for file_name in os.listdir("docs"):
    path = os.path.join("docs", file_name) # 文件路径拼接
    with open(path, "r", encoding="utf-8") as f:
        text = f.read()
        name = file_name.split(".")[0]
        docs.append({"name": name, "text": text})


# 现在docs是一个列表，里面是字符串，需要转换成langchain的Document对象
docs = [Document(page_content=doc["text"], metadata={"name": doc["name"]}) for doc in docs]

# 2.切一下文档
texts = text_splitter.split_documents(docs)
#如果已经是langchain的documents对象，
# 则可直接调用split_方法切分，
# 否则是create_documents

print(f"texts数量: {len(texts)}")

# 3.构建文档库
vectorstore = Chroma.from_documents(
    texts,
    embedding=embedding_model.get_model(),
    persist_directory="./chroma_db" # 注意是添加存储而非覆盖
)






