"""
向量存储使用示例
展示如何使用 RAG Pipeline 处理文档并进行搜索
"""
import os
import sys
from pathlib import Path

# 添加项目根目录到 Python 路径
project_root = Path(__file__).parent.parent
sys.path.insert(0, str(project_root))

from app.config import get_settings
from utils.rag_pipeline import create_rag_pipeline_from_config


def example_1_basic_usage():
    """示例 1: 基本使用"""
    print("\n" + "="*60)
    print("示例 1: 基本使用 - 处理单个文件并搜索")
    print("="*60)
    
    # 从配置创建 RAG 流程管道
    settings = get_settings()
    config = {
        "MILVUS_HOST": settings.MILVUS_HOST,
        "MILVUS_PORT": settings.MILVUS_PORT,
        "MILVUS_USER": settings.MILVUS_USER,
        "MILVUS_PASSWORD": settings.MILVUS_PASSWORD,
        "MILVUS_COLLECTION_NAME": settings.MILVUS_COLLECTION_NAME,
        "EMBEDDING_MODEL": settings.EMBEDDING_MODEL,
        "EMBEDDING_DIMENSION": settings.EMBEDDING_DIMENSION,
        "OPENAI_API_KEY": settings.OPENAI_API_KEY,
        "OPENAI_API_BASE": settings.OPENAI_API_BASE,
        "CHUNK_SIZE": 1000,
        "CHUNK_OVERLAP": 100,
        "ENCODING": "utf-8",
    }
    
    pipeline = create_rag_pipeline_from_config(config)
    
    # 创建集合
    print("\n1. 创建 Milvus 集合...")
    pipeline.vector_store.create_collection(drop_if_exists=True)
    print("✓ 集合创建成功")
    
    # 处理文件（假设有一个测试文件）
    # file_path = "path/to/your/document.txt"
    # print(f"\n2. 处理文件: {file_path}")
    # result = pipeline.process_and_store_file(file_path)
    # print(f"✓ 文件处理成功: 生成 {result['chunk_count']} 个文档块")
    
    # 执行搜索
    # query = "你的查询文本"
    # print(f"\n3. 搜索查询: {query}")
    # search_results = pipeline.search(query, top_k=5)
    # print(f"✓ 搜索完成，返回 {len(search_results)} 个结果")
    
    # 打印结果
    # for i, result in enumerate(search_results, 1):
    #     print(f"\n结果 {i}:")
    #     print(f"  - 相似度分数: {result['score']:.4f}")
    #     print(f"  - 文本预览: {result['text'][:100]}...")
    
    # 获取统计信息
    print("\n4. 获取统计信息...")
    stats = pipeline.get_stats()
    print(f"✓ 集合统计: {stats}")


def example_2_batch_processing():
    """示例 2: 批量处理"""
    print("\n" + "="*60)
    print("示例 2: 批量处理多个文件")
    print("="*60)
    
    settings = get_settings()
    config = {
        "MILVUS_HOST": settings.MILVUS_HOST,
        "MILVUS_PORT": settings.MILVUS_PORT,
        "MILVUS_COLLECTION_NAME": settings.MILVUS_COLLECTION_NAME,
        "EMBEDDING_MODEL": settings.EMBEDDING_MODEL,
        "EMBEDDING_DIMENSION": settings.EMBEDDING_DIMENSION,
        "OPENAI_API_KEY": settings.OPENAI_API_KEY,
    }
    
    pipeline = create_rag_pipeline_from_config(config)
    
    # 批量处理文件
    # file_paths = [
    #     "path/to/file1.txt",
    #     "path/to/file2.pdf",
    #     "path/to/file3.docx",
    # ]
    # print(f"\n处理 {len(file_paths)} 个文件...")
    # results = pipeline.process_and_store_files(file_paths)
    # 
    # for result in results:
    #     if result['success']:
    #         print(f"✓ {result['file_path']}: {result['chunk_count']} 个块")
    #     else:
    #         print(f"✗ {result['file_path']}: {result['error']}")


def example_3_directory_processing():
    """示例 3: 处理整个目录"""
    print("\n" + "="*60)
    print("示例 3: 处理整个目录")
    print("="*60)
    
    settings = get_settings()
    config = {
        "MILVUS_HOST": settings.MILVUS_HOST,
        "MILVUS_PORT": settings.MILVUS_PORT,
        "MILVUS_COLLECTION_NAME": settings.MILVUS_COLLECTION_NAME,
        "EMBEDDING_MODEL": settings.EMBEDDING_MODEL,
        "EMBEDDING_DIMENSION": settings.EMBEDDING_DIMENSION,
        "OPENAI_API_KEY": settings.OPENAI_API_KEY,
    }
    
    pipeline = create_rag_pipeline_from_config(config)
    
    # 处理目录中的所有文档
    # directory_path = "path/to/documents"
    # print(f"\n处理目录: {directory_path}")
    # results = pipeline.process_and_store_directory(
    #     directory_path,
    #     file_extensions=['.txt', '.pdf', '.docx']
    # )
    # 
    # success_count = sum(1 for r in results if r['success'])
    # print(f"✓ 成功处理 {success_count}/{len(results)} 个文件")


def example_4_search_and_rag():
    """示例 4: 搜索并获取 RAG 上下文"""
    print("\n" + "="*60)
    print("示例 4: 搜索并获取 RAG 上下文")
    print("="*60)
    
    settings = get_settings()
    config = {
        "MILVUS_HOST": settings.MILVUS_HOST,
        "MILVUS_PORT": settings.MILVUS_PORT,
        "MILVUS_COLLECTION_NAME": settings.MILVUS_COLLECTION_NAME,
        "EMBEDDING_MODEL": settings.EMBEDDING_MODEL,
        "EMBEDDING_DIMENSION": settings.EMBEDDING_DIMENSION,
        "OPENAI_API_KEY": settings.OPENAI_API_KEY,
    }
    
    pipeline = create_rag_pipeline_from_config(config)
    
    # 获取查询的上下文
    # query = "你的查询文本"
    # print(f"\n查询: {query}")
    # context = pipeline.get_context_for_query(query, top_k=3)
    # print(f"\n上下文内容:\n{context[:500]}...")
    # 
    # # 这个上下文可以直接传递给 LLM 进行生成
    # # prompt = f"根据以下上下文回答问题:\n\n{context}\n\n问题: {query}\n\n回答:"


def example_5_direct_vector_store():
    """示例 5: 直接使用向量存储"""
    print("\n" + "="*60)
    print("示例 5: 直接使用向量存储 API")
    print("="*60)
    
    from utils.vector_store_handler import VectorStoreHandler
    from langchain_core.documents import Document
    
    settings = get_settings()
    
    # 创建向量存储实例
    vector_store = VectorStoreHandler(
        host=settings.MILVUS_HOST,
        port=settings.MILVUS_PORT,
        collection_name=settings.MILVUS_COLLECTION_NAME,
        embedding_model=settings.EMBEDDING_MODEL,
        embedding_dimension=settings.EMBEDDING_DIMENSION,
        openai_api_key=settings.OPENAI_API_KEY,
    )
    
    print("\n1. 创建集合...")
    vector_store.create_collection(drop_if_exists=True)
    print("✓ 集合创建成功")
    
    # 插入文档
    # documents = [
    #     Document(page_content="这是第一个文档", metadata={"source": "doc1"}),
    #     Document(page_content="这是第二个文档", metadata={"source": "doc2"}),
    # ]
    # print(f"\n2. 插入 {len(documents)} 个文档...")
    # ids = vector_store.insert_documents(documents)
    # print(f"✓ 插入成功，ID: {ids}")
    
    # 搜索
    # print("\n3. 执行搜索...")
    # results = vector_store.search_similar("查询文本", top_k=5)
    # for result in results:
    #     print(f"  - 分数: {result['score']:.4f}, 文本: {result['text'][:50]}...")
    
    # 获取统计
    print("\n4. 获取统计信息...")
    stats = vector_store.get_collection_stats()
    print(f"✓ 统计信息: {stats}")
    
    # 关闭连接
    vector_store.close()


def main():
    """主函数"""
    print("\n" + "="*60)
    print("RAG 向量存储使用示例")
    print("="*60)
    
    # 检查环境变量
    try:
        settings = get_settings()
        
        # 检查必要的配置
        if not settings.OPENAI_API_KEY:
            print("\n⚠ 警告: OPENAI_API_KEY 未配置")
            print("请在 .env 文件中设置 OPENAI_API_KEY")
            return
        
        print("\n配置检查通过 ✓")
        print(f"  - Milvus 地址: {settings.MILVUS_HOST}:{settings.MILVUS_PORT}")
        print(f"  - 集合名称: {settings.MILVUS_COLLECTION_NAME}")
        print(f"  - Embedding 模型: {settings.EMBEDDING_MODEL}")
        print(f"  - 向量维度: {settings.EMBEDDING_DIMENSION}")
        
        # 运行示例（取消注释以运行）
        # example_1_basic_usage()
        # example_2_batch_processing()
        # example_3_directory_processing()
        # example_4_search_and_rag()
        example_5_direct_vector_store()
        
    except Exception as e:
        print(f"\n错误: {e}")
        import traceback
        traceback.print_exc()


if __name__ == "__main__":
    main()

