import logging
import asyncio
import nest_asyncio
from llama_index.core import SimpleDirectoryReader

# 导入自定义模块
import config
from data_fetcher import fetch_and_save_documents
from pipeline_services import setup_global_settings
from ingestion_pipeline import run_pipeline

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)

# 应用 nest_asyncio 以允许在已经运行的事件循环中再次运行 asyncio
nest_asyncio.apply()


async def main():
    """
    主函数，协调整个数据摄入流程。
    """
    logger = logging.getLogger(__name__)
    logger.info("---------- 开始数据摄入流程 ----------")

    # 步骤 1: 从 Tavily 获取数据并保存到本地
    fetch_and_save_documents(
        query=config.SEARCH_QUERY,
        max_results=config.MAX_SEARCH_RESULTS,
        output_dir=config.OUTPUT_DIR,
        api_key=config.TAVILY_API_KEY
    )

    # 步骤 2: 从本地目录加载文档
    logger.info(f"正在从 '{config.OUTPUT_DIR}' 目录加载文档...")
    try:
        reader = SimpleDirectoryReader(config.OUTPUT_DIR, filename_as_id=True)
        documents = reader.load_data()
        logger.info(f"成功加载 {len(documents)} 篇文档。")
    except Exception as e:
        logger.error(f"加载文档失败: {e}")
        return

    # 步骤 3: 设置全局 LLM 和 Embedding Model
    setup_global_settings()

    # 步骤 4: 运行摄入流水线
    try:
        await run_pipeline(documents)
    except Exception as e:
        logger.critical(f"摄入流水线在运行时发生严重错误: {e}", exc_info=True)

    logger.info("---------- 数据摄入流程全部完成 ----------")


if __name__ == "__main__":
    # 使用 asyncio 运行异步主函数
    asyncio.run(main())