"""
RAG系统调试脚本

专门用于诊断和验证RAG系统各个环节的工作状态
帮助定位检索失败的具体原因

作者：调试助手
"""

import sys
import os
import time

# 添加src目录到Python路径
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'src'))

from src.config import config
from src.document_processor import PDFProcessor, TextCleaner, TextSplitter
from src.embeddings import EmbeddingGenerator, VectorStore
from src.retriever import RetrievalEngine
from src.rag_engine import RAGEngine

def debug_step_1_pdf_extraction():
    """步骤1：调试PDF文本提取"""
    print("=" * 60)
    print("🔍 步骤1：调试PDF文本提取")
    print("=" * 60)
    
    try:
        processor = PDFProcessor()
        print(f"📁 PDF文件路径: {processor.pdf_path}")
        
        # 获取PDF信息
        pdf_info = processor.get_pdf_info()
        print(f"📋 PDF信息:")
        for key, value in pdf_info.items():
            print(f"   {key}: {value}")
        
        # 提取文本
        print("\n📄 正在提取PDF文本...")
        raw_text = processor.extract_text()
        
        print(f"✅ 文本提取成功!")
        print(f"📏 原始文本长度: {len(raw_text)} 字符")
        print(f"📝 文本前500字符预览:")
        print("-" * 40)
        print(raw_text[:500])
        print("-" * 40)
        
        # 检查是否包含关键词
        keywords = ["翁和平", "金钟人", "族姓赞"]
        print(f"\n🔍 关键词检查:")
        for keyword in keywords:
            if keyword in raw_text:
                print(f"   ✅ 找到关键词: {keyword}")
            else:
                print(f"   ❌ 未找到关键词: {keyword}")
        
        return raw_text
        
    except Exception as e:
        print(f"❌ PDF提取失败: {str(e)}")
        return None

def debug_step_2_text_cleaning(raw_text):
    """步骤2：调试文本清洗"""
    print("\n" + "=" * 60)
    print("🧹 步骤2：调试文本清洗")
    print("=" * 60)
    
    if not raw_text:
        print("❌ 没有原始文本，跳过清洗步骤")
        return None
    
    try:
        cleaner = TextCleaner()
        cleaned_text = cleaner.clean_text(raw_text)
        
        print(f"✅ 文本清洗完成!")
        print(f"📏 清洗前长度: {len(raw_text)} 字符")
        print(f"📏 清洗后长度: {len(cleaned_text)} 字符")
        print(f"📝 清洗后文本前500字符预览:")
        print("-" * 40)
        print(cleaned_text[:500])
        print("-" * 40)
        
        return cleaned_text
        
    except Exception as e:
        print(f"❌ 文本清洗失败: {str(e)}")
        return None

def debug_step_3_text_splitting(cleaned_text):
    """步骤3：调试文本分割"""
    print("\n" + "=" * 60)
    print("✂️ 步骤3：调试文本分割")
    print("=" * 60)
    
    if not cleaned_text:
        print("❌ 没有清洗后的文本，跳过分割步骤")
        return None
    
    try:
        splitter = TextSplitter()
        text_chunks = splitter.split_text(cleaned_text)
        
        print(f"✅ 文本分割完成!")
        print(f"📊 配置: 块大小={config.CHUNK_SIZE}, 重叠={config.CHUNK_OVERLAP}")
        print(f"📦 生成文本块数量: {len(text_chunks)}")
        
        print(f"\n📝 各文本块预览:")
        for i, chunk in enumerate(text_chunks):
            print(f"   📦 块 {i+1} (长度: {len(chunk)} 字符):")
            print(f"      {chunk[:100]}...")
            print()
        
        return text_chunks
        
    except Exception as e:
        print(f"❌ 文本分割失败: {str(e)}")
        return None

def debug_step_4_vectorization(text_chunks):
    """步骤4：调试向量化"""
    print("\n" + "=" * 60)
    print("🔢 步骤4：调试向量化")
    print("=" * 60)
    
    if not text_chunks:
        print("❌ 没有文本块，跳过向量化步骤")
        return None
    
    try:
        print("🔄 正在初始化向量化模型...")
        embedding_generator = EmbeddingGenerator()
        
        print(f"✅ 向量化模型加载成功!")
        print(f"📏 向量维度: {embedding_generator.get_embedding_dimension()}")
        
        print(f"\n🔄 正在向量化 {len(text_chunks)} 个文本块...")
        embeddings = embedding_generator.encode_texts(text_chunks)
        
        print(f"✅ 向量化完成!")
        print(f"📊 生成向量数量: {len(embeddings)}")
        
        # 测试单个查询向量化
        test_query = "翁和平的诗文名称"
        print(f"\n🧪 测试查询向量化: '{test_query}'")
        query_embedding = embedding_generator.encode_text(test_query)
        print(f"✅ 查询向量化成功，维度: {len(query_embedding)}")
        
        return embeddings, embedding_generator
        
    except Exception as e:
        print(f"❌ 向量化失败: {str(e)}")
        return None, None

def debug_step_5_retrieval(text_chunks, embeddings, embedding_generator):
    """步骤5：调试检索功能"""
    print("\n" + "=" * 60)
    print("🔍 步骤5：调试检索功能")
    print("=" * 60)
    
    if not text_chunks or not embeddings:
        print("❌ 缺少必要数据，跳过检索步骤")
        return
    
    try:
        # 创建向量存储
        vector_store = VectorStore()
        metadata = [{'chunk_id': i, 'source': 'pdf'} for i in range(len(text_chunks))]
        vector_store.add_documents(text_chunks, embeddings, metadata)
        
        print(f"✅ 向量存储创建成功!")
        print(f"📊 存储的文档数量: {vector_store.get_document_count()}")
        
        # 测试不同的检索查询
        test_queries = [
            "翁和平的两首诗文分别叫什么名字",
            "金钟人",
            "族姓赞", 
            "翁和平诗文",
            "诗文名称"
        ]
        
        for query in test_queries:
            print(f"\n🔍 测试查询: '{query}'")
            
            # 向量化查询
            query_embedding = embedding_generator.encode_text(query)
            
            # 测试不同的相似度阈值
            thresholds = [0.3, 0.5, 0.7]
            for threshold in thresholds:
                results = vector_store.search(
                    query_embedding, 
                    top_k=5, 
                    similarity_threshold=threshold
                )
                
                print(f"   📊 阈值 {threshold}: 找到 {len(results)} 个结果")
                
                if results:
                    for i, result in enumerate(results[:2]):  # 只显示前2个
                        print(f"      🎯 结果 {i+1}: 相似度={result['similarity']:.3f}")
                        print(f"         内容: {result['content'][:80]}...")
        
        return vector_store
        
    except Exception as e:
        print(f"❌ 检索测试失败: {str(e)}")
        return None

def debug_step_6_full_rag_test():
    """步骤6：调试完整RAG流程"""
    print("\n" + "=" * 60)
    print("⚙️ 步骤6：调试完整RAG流程")
    print("=" * 60)
    
    try:
        # 创建RAG引擎
        rag_engine = RAGEngine()
        
        # 初始化
        print("🔄 正在初始化RAG引擎...")
        init_result = rag_engine.initialize_from_pdf()
        
        if init_result['success']:
            print("✅ RAG引擎初始化成功!")
            stats = init_result['statistics']
            print(f"📊 统计信息:")
            for key, value in stats.items():
                print(f"   {key}: {value}")
        else:
            print(f"❌ RAG引擎初始化失败: {init_result.get('message')}")
            return
        
        # 测试问答
        test_question = "翁和平的两首诗文分别叫什么名字，不用输出诗文内容"
        print(f"\n❓ 测试问题: '{test_question}'")
        
        # 先降低相似度阈值进行测试
        original_threshold = config.SIMILARITY_THRESHOLD
        config.SIMILARITY_THRESHOLD = 0.3  # 临时降低阈值
        
        result = rag_engine.ask_question(test_question)
        
        # 恢复原始阈值
        config.SIMILARITY_THRESHOLD = original_threshold
        
        if result['success']:
            print(f"✅ 问答成功!")
            print(f"🤖 回答: {result['answer']}")
            print(f"📊 使用模式: {result.get('mode', '未知')}")
            
            if 'retrieval_results' in result:
                retrieval_results = result['retrieval_results']
                print(f"🔍 检索到 {len(retrieval_results)} 个相关文档")
                for i, doc in enumerate(retrieval_results[:2]):
                    print(f"   📄 文档 {i+1}: 相似度={doc.get('similarity', 0):.3f}")
                    print(f"      内容: {doc.get('content', '')[:100]}...")
        else:
            print(f"❌ 问答失败: {result.get('message')}")
            
    except Exception as e:
        print(f"❌ 完整RAG测试失败: {str(e)}")

def main():
    """主调试函数"""
    print("🚀 RAG系统调试开始...")
    print(f"📋 当前配置:")
    print(f"   PDF文件: {config.PDF_PATH}")
    print(f"   块大小: {config.CHUNK_SIZE}")
    print(f"   重叠大小: {config.CHUNK_OVERLAP}")
    print(f"   TOP_K: {config.TOP_K}")
    print(f"   相似度阈值: {config.SIMILARITY_THRESHOLD}")
    
    # 逐步调试
    raw_text = debug_step_1_pdf_extraction()
    cleaned_text = debug_step_2_text_cleaning(raw_text)
    text_chunks = debug_step_3_text_splitting(cleaned_text)
    embeddings, embedding_generator = debug_step_4_vectorization(text_chunks)
    vector_store = debug_step_5_retrieval(text_chunks, embeddings, embedding_generator)
    debug_step_6_full_rag_test()
    
    print("\n" + "=" * 60)
    print("🎉 RAG系统调试完成!")
    print("=" * 60)
    
    print("\n💡 建议:")
    print("1. 如果检索结果少，尝试降低相似度阈值 (当前: 0.7 -> 建议: 0.3-0.5)")
    print("2. 如果文本块太少，考虑减小块大小")
    print("3. 如果关键词匹配失败，检查PDF文本提取是否正确")

if __name__ == "__main__":
    main() 