import os
os.environ["TOKENIZERS_PARALLELISM"] = "false"

from config.config import Config
from src.document_loader import DocumentLoader
from src.text_splitter import TextSplitter
from src.embeddings import CustomEmbeddings
from src.vector_store import VectorStore
from src.rag_chain import RAGChain

def main():
    # 初始化配置
    config = Config()
    
    # 检查目录
    print(f"Documents directory: {config.DOCUMENTS_DIR}")
    print(f"Directory exists: {config.DOCUMENTS_DIR.exists()}")
    
    # 加载文档
    loader = DocumentLoader(config.DOCUMENTS_DIR)
    documents = loader.load_documents()
    
    # 分割文档
    splitter = TextSplitter(config.CHUNK_SIZE, config.CHUNK_OVERLAP)
    splits = splitter.split_documents(documents)
    
    # 添加调试信息
    print(f"Number of documents loaded: {len(documents)}")
    print(f"Number of splits: {len(splits)}")
    
    # 创建向量存储
    embeddings = CustomEmbeddings().get_embeddings()
    vector_store = VectorStore(embeddings)
    
    # 如果向量数据库已存在，则加载；否则创建新的
    if config.VECTOR_DB_PATH.exists():
        vs = vector_store.load_vector_store(config.VECTOR_DB_PATH)
    else:
        vs = vector_store.create_vector_store(splits, config.VECTOR_DB_PATH)
    
    # 创建 RAG 链
    rag_chain = RAGChain(
        api_key=config.DEEPSEEK_API_KEY,
        api_base=config.DEEPSEEK_API_BASE,
        vector_store=vs,
        top_k=config.TOP_K
    )
    
    # 测试查询
    question = "什么是RAG？"
    result = rag_chain.query(question)
    print(f"问题: {question}")
    print(f"答案: {result['result']}")
    print("\n相关文档片段:")
    for doc in result['source_documents']:
        print(f"- {doc.page_content[:200]}...")

if __name__ == "__main__":
    main() 