from langchain_community.document_loaders import DirectoryLoader
from major.models_manager import embedding_model
from langchain_chroma import Chroma
from langchain_text_splitters import RecursiveCharacterTextSplitter

text_splitter = RecursiveCharacterTextSplitter(
    # Set a really small chunk size, just to show.
    chunk_size=100,
    chunk_overlap=20,
    length_function=len,
    is_separator_regex=False,
    separators=[
        "\n\n",
        "\n",
        " ",
        ".",
        ",",
        "\u200b",  # Zero-width space
        "\uff0c",  # Fullwidth comma
        "\u3001",  # Ideographic comma
        "\uff0e",  # Fullwidth full stop
        "\u3002",  # Ideographic full stop
        "",
    ],
)

# 1.加载文档
loader = DirectoryLoader("docs")
docs = loader.load()

# 2.切一下文档
texts = text_splitter.split_documents(docs)
#如果已经是langchain的documents对象，
# 则可直接调用split_方法切分，
# 否则是create_documents

print(f"texts数量: {len(texts)}")

# 3.构建文档库
vectorstore = Chroma.from_documents(
    texts,
    embedding=embedding_model.get_model(),
    persist_directory="./chroma_db" # 注意是添加存储而非覆盖
)






results = vectorstore.similarity_search("注意力机制是什么", k=3)


for doc in results:
    print(f"内容: {doc.page_content}")
    print(f"来源: {doc.metadata['source']}")
    print("-" * 20)