import logging
from llama_index.core.ingestion import IngestionPipeline
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.extractors import TitleExtractor, SummaryExtractor
from llama_index.core.schema import Document
from llamaindex_ingestion_project import config, pipeline_services

logger = logging.getLogger(__name__)


async def run_pipeline(documents: list[Document]) -> None:
    """
    构建并异步运行摄入流水线。

    Args:
        documents (list[Document]): 从本地文件加载的文档列表。
    """
    if not documents:
        logger.warning("文档列表为空，跳过摄入流程。")
        return

    docstore, vector_store, ingest_cache = pipeline_services.initialize_stores()

    logger.info("正在构建 Ingestion Pipeline...")
    pipeline = IngestionPipeline(
        transformations=[
            SentenceSplitter(chunk_size=config.CHUNK_SIZE, chunk_overlap=config.CHUNK_OVERLAP),
            TitleExtractor(),
            SummaryExtractor(),
        ],
        docstore=docstore,
        vector_store=vector_store,
        cache=ingest_cache,
    )

    logger.info(f"开始使用 {config.NUM_WORKERS} 个工作进程运行 Ingestion Pipeline...")
    await pipeline.arun(documents=documents, num_workers=config.NUM_WORKERS)
    logger.info("Ingestion Pipeline 运行完成。")