from langchain_text_splitters import RecursiveCharacterTextSplitter
from app.core.loaders import load_documents
from app.core.vector_store import get_vector_store
from app.configs.settings import config


def load_and_ingest(data_path: str):
    """
    Loads documents from a path, splits them, and ingests them into the vector store.
    """
    # 1. Load documents
    docs = load_documents(data_path)

    # 2. Split documents
    text_splitter = RecursiveCharacterTextSplitter(
        chunk_size=config.text_splitter.chunk_size,
        chunk_overlap=config.text_splitter.chunk_overlap
    )
    splits = text_splitter.split_documents(docs)

    if not splits:
        print("No documents to ingest.")
        return

    print(f"Split {len(docs)} documents into {len(splits)} chunks.")

    splits = text_splitter.split_documents(docs)

    print(f"分割成 {len(splits)} 个文本块")

    # 查看前三个文本块的内容
    for i, split in enumerate(splits[:3], 1):
        print(f"\n文本块 {i} 预览:")
        print(f"  长度: {len(split.page_content)} 字符")
        print(f"  内容: {split.page_content[:100]}...")

    # 3. Ingest into vector store
    vector_store = get_vector_store()
    print("Ingesting chunks into vector store...")
    vector_store.add_documents(documents=splits)
    print("Ingestion complete.")