"""
针对9页PDF的RAG系统调试脚本

专门调试data/weng_page401-409.pdf文件的RAG处理流程
重点检查分块、检索和问答逻辑

作者：调试助手
"""

import sys
import os
import time

# 添加src目录到Python路径
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'src'))

from src.config import config
from src.document_processor import PDFProcessor, TextCleaner, TextSplitter
from src.embeddings import EmbeddingGenerator, VectorStore
from src.retriever import RetrievalEngine
from src.rag_engine import RAGEngine

def step1_check_pdf_file():
    """步骤1：检查PDF文件和配置"""
    print("=" * 60)
    print("📋 步骤1：检查PDF文件和配置")
    print("=" * 60)
    
    print(f"📁 配置的PDF路径: {config.PDF_PATH}")
    print(f"📊 当前文本块配置: 大小={config.CHUNK_SIZE}, 重叠={config.CHUNK_OVERLAP}")
    print(f"🔍 相似度阈值: {config.SIMILARITY_THRESHOLD}")
    
    # 检查文件是否存在
    if os.path.exists(config.PDF_PATH):
        file_size = os.path.getsize(config.PDF_PATH)
        print(f"✅ PDF文件存在，大小: {file_size / (1024*1024):.1f} MB")
    else:
        print(f"❌ PDF文件不存在: {config.PDF_PATH}")
        return False
    
    return True

def step2_extract_and_analyze_text():
    """步骤2：提取并深度分析PDF文本"""
    print("\n" + "=" * 60)
    print("📄 步骤2：提取并深度分析PDF文本")
    print("=" * 60)
    
    try:
        processor = PDFProcessor()
        
        # 获取PDF信息
        pdf_info = processor.get_pdf_info()
        print(f"📋 PDF基本信息:")
        print(f"   页数: {pdf_info.get('page_count', '未知')}")
        print(f"   文件大小: {pdf_info.get('file_size', 0)} 字节")
        
        # 提取文本
        print(f"\n📄 正在提取PDF文本...")
        raw_text = processor.extract_text()
        
        print(f"✅ 文本提取成功!")
        print(f"📏 原始文本长度: {len(raw_text)} 字符")
        
        # 分析文本内容
        print(f"\n🔍 深度内容分析:")
        
        # 查找关键词
        keywords = ["翁和平", "金钟人", "族姓赞", "诗文选载"]
        keyword_positions = {}
        
        for keyword in keywords:
            positions = []
            start = 0
            while True:
                pos = raw_text.find(keyword, start)
                if pos == -1:
                    break
                positions.append(pos)
                start = pos + 1
            
            keyword_positions[keyword] = positions
            if positions:
                print(f"   ✅ 找到 '{keyword}': {len(positions)} 次，位置: {positions}")
            else:
                print(f"   ❌ 未找到 '{keyword}'")
        
        # 分析文本结构
        lines = raw_text.split('\n')
        print(f"\n📝 文本结构分析:")
        print(f"   总行数: {len(lines)}")
        
        # 查找诗文标题行
        title_lines = []
        for i, line in enumerate(lines):
            line_stripped = line.strip()
            if '金钟人' in line_stripped and len(line_stripped) <= 20:
                title_lines.append((i, '《金钟人》', line_stripped))
            if '族姓赞' in line_stripped and len(line_stripped) <= 20:
                title_lines.append((i, '《族姓赞》', line_stripped))
        
        print(f"   找到标题行:")
        for line_num, title, content in title_lines:
            print(f"      行 {line_num}: {title} -> '{content}'")
        
        # 显示文本样本
        print(f"\n📝 文本前500字符预览:")
        print(f"{'='*40}")
        print(raw_text[:500])
        print(f"{'='*40}")
        
        if len(raw_text) > 1000:
            print(f"\n📝 文本中段500字符预览:")
            print(f"{'='*40}")
            mid_pos = len(raw_text) // 2
            print(raw_text[mid_pos:mid_pos+500])
            print(f"{'='*40}")
        
        return raw_text, keyword_positions, title_lines
        
    except Exception as e:
        print(f"❌ PDF文本提取失败: {str(e)}")
        return None, None, None

def step3_test_text_chunking(raw_text):
    """步骤3：测试文本分块策略"""
    print("\n" + "=" * 60)
    print("✂️ 步骤3：测试文本分块策略")
    print("=" * 60)
    
    if not raw_text:
        print("❌ 没有原始文本，跳过分块测试")
        return None
    
    try:
        cleaner = TextCleaner()
        
        # 测试不同的分块配置
        chunk_configs = [
            (300, 50),   # 较大的块
            (200, 30),   # 中等的块
            (150, 25),   # 较小的块
            (100, 20),   # 很小的块
        ]
        
        cleaned_text = cleaner.clean_text(raw_text)
        print(f"📏 清洗后文本长度: {len(cleaned_text)} 字符")
        
        best_config = None
        best_chunks = None
        
        for chunk_size, overlap in chunk_configs:
            print(f"\n🧪 测试配置: 块大小={chunk_size}, 重叠={overlap}")
            
            splitter = TextSplitter(chunk_size=chunk_size, chunk_overlap=overlap)
            chunks = splitter.split_text(cleaned_text)
            
            print(f"   📦 生成块数量: {len(chunks)}")
            
            # 分析块的内容分布
            blocks_with_jinzhongren = 0
            blocks_with_zuxingzan = 0
            
            for i, chunk in enumerate(chunks):
                has_jinzhongren = '金钟人' in chunk
                has_zuxingzan = '族姓赞' in chunk
                
                if has_jinzhongren:
                    blocks_with_jinzhongren += 1
                if has_zuxingzan:
                    blocks_with_zuxingzan += 1
                
                if has_jinzhongren or has_zuxingzan:
                    print(f"      📦 块 {i+1}: 长度={len(chunk)}, 金钟人={has_jinzhongren}, 族姓赞={has_zuxingzan}")
                    print(f"         内容预览: {chunk[:80]}...")
            
            print(f"   📊 包含《金钟人》的块: {blocks_with_jinzhongren}")
            print(f"   📊 包含《族姓赞》的块: {blocks_with_zuxingzan}")
            
            # 选择最优配置（至少两个关键词都出现）
            if blocks_with_jinzhongren > 0 and blocks_with_zuxingzan > 0:
                if best_config is None or len(chunks) > len(best_chunks):
                    best_config = (chunk_size, overlap)
                    best_chunks = chunks
        
        if best_config:
            print(f"\n✅ 推荐配置: 块大小={best_config[0]}, 重叠={best_config[1]}")
            print(f"   生成 {len(best_chunks)} 个块，两个关键词都有覆盖")
            
            # 更新配置
            config.CHUNK_SIZE = best_config[0]
            config.CHUNK_OVERLAP = best_config[1]
            
            return best_chunks
        else:
            print(f"\n⚠️ 未找到理想配置，使用默认配置")
            splitter = TextSplitter()
            return splitter.split_text(cleaned_text)
        
    except Exception as e:
        print(f"❌ 文本分块测试失败: {str(e)}")
        return None

def step4_test_vectorization_and_retrieval(text_chunks):
    """步骤4：测试向量化和检索"""
    print("\n" + "=" * 60)
    print("🔢 步骤4：测试向量化和检索")
    print("=" * 60)
    
    if not text_chunks:
        print("❌ 没有文本块，跳过向量化测试")
        return None
    
    try:
        print(f"🔄 正在初始化向量化模型...")
        embedding_generator = EmbeddingGenerator()
        print(f"✅ 向量化模型加载成功，维度: {embedding_generator.get_embedding_dimension()}")
        
        print(f"\n🔄 正在向量化 {len(text_chunks)} 个文本块...")
        embeddings = embedding_generator.encode_texts(text_chunks)
        print(f"✅ 向量化完成，生成 {len(embeddings)} 个向量")
        
        # 创建向量存储
        vector_store = VectorStore()
        metadata = [{'chunk_id': i, 'source': 'pdf'} for i in range(len(text_chunks))]
        vector_store.add_documents(text_chunks, embeddings, metadata)
        print(f"✅ 向量存储创建成功，包含 {vector_store.get_document_count()} 个文档")
        
        # 测试不同的检索查询
        test_queries = [
            "翁和平的诗文名称",
            "翁和平的两首诗分别叫什么名字",
            "金钟人",
            "族姓赞",
            "诗文标题",
            "翁和平作品标题",
            "两首诗的名字",
        ]
        
        print(f"\n🔍 测试检索性能:")
        
        best_query = None
        best_results = []
        
        for query in test_queries:
            print(f"\n   🔍 查询: '{query}'")
            
            query_embedding = embedding_generator.encode_text(query)
            
            # 测试不同阈值
            thresholds = [0.1, 0.2, 0.3]
            for threshold in thresholds:
                results = vector_store.search(
                    query_embedding,
                    top_k=5,
                    similarity_threshold=threshold
                )
                
                if results:
                    print(f"      阈值 {threshold}: 找到 {len(results)} 个结果")
                    
                    # 检查结果质量
                    has_both = False
                    has_jinzhongren = any('金钟人' in r.get('content', '') for r in results)
                    has_zuxingzan = any('族姓赞' in r.get('content', '') for r in results)
                    
                    if has_jinzhongren and has_zuxingzan:
                        has_both = True
                        print(f"         🎉 同时找到两首诗的信息！")
                        
                        if len(results) > len(best_results):
                            best_query = query
                            best_results = results
                    
                    # 显示前2个结果
                    for i, result in enumerate(results[:2]):
                        similarity = result.get('similarity', 0)
                        content = result.get('content', '')[:100]
                        print(f"         结果 {i+1}: 相似度={similarity:.3f}, 内容: {content}...")
                else:
                    print(f"      阈值 {threshold}: 无结果")
        
        if best_results:
            print(f"\n✅ 最佳查询: '{best_query}', 找到 {len(best_results)} 个高质量结果")
        
        return vector_store, embedding_generator, best_query, best_results
        
    except Exception as e:
        print(f"❌ 向量化和检索测试失败: {str(e)}")
        return None, None, None, None

def step5_test_full_rag_pipeline():
    """步骤5：测试完整RAG流程"""
    print("\n" + "=" * 60)
    print("⚙️ 步骤5：测试完整RAG流程")
    print("=" * 60)
    
    try:
        print("🔄 正在初始化RAG引擎...")
        rag_engine = RAGEngine()
        
        print("🔄 正在从PDF初始化系统...")
        init_result = rag_engine.initialize_from_pdf()
        
        if not init_result['success']:
            print(f"❌ RAG引擎初始化失败: {init_result.get('message')}")
            return False
        
        print("✅ RAG引擎初始化成功!")
        stats = init_result['statistics']
        print(f"📊 处理统计:")
        for key, value in stats.items():
            print(f"   {key}: {value}")
        
        # 测试不同表述的问题
        test_questions = [
            "翁和平的两首诗文分别叫什么名字，不用输出诗文内容",
            "请列出翁和平的诗文标题",
            "翁和平写了哪些诗？请只说标题名称",
            "文档中提到的翁和平的作品有哪些？",
            "金钟人和族姓赞是什么？",
        ]
        
        print(f"\n❓ 测试不同问题表述:")
        
        for i, question in enumerate(test_questions):
            print(f"\n🧪 测试 {i+1}: '{question}'")
            
            # 使用较低的阈值确保检索到内容
            original_threshold = config.SIMILARITY_THRESHOLD
            config.SIMILARITY_THRESHOLD = 0.1
            
            start_time = time.time()
            result = rag_engine.ask_question(question)
            end_time = time.time()
            
            # 恢复阈值
            config.SIMILARITY_THRESHOLD = original_threshold
            
            if result['success']:
                answer = result['answer']
                print(f"✅ 回答成功 (耗时: {end_time-start_time:.2f}秒)")
                print(f"🤖 回答: {answer}")
                
                # 检查答案质量
                answer_lower = answer.lower()
                has_jinzhongren = '金钟人' in answer_lower
                has_zuxingzan = '族姓赞' in answer_lower
                
                print(f"📊 答案质量检查:")
                print(f"   包含《金钟人》: {has_jinzhongren}")
                print(f"   包含《族姓赞》: {has_zuxingzan}")
                
                if has_jinzhongren and has_zuxingzan:
                    print(f"   🎉 完美答案！同时包含两首诗的标题")
                    return True
                elif has_jinzhongren or has_zuxingzan:
                    print(f"   ⚠️ 部分正确，但遗漏了一首诗")
                else:
                    print(f"   ❌ 答案不包含任何诗文标题")
            else:
                print(f"❌ 回答失败: {result.get('message')}")
        
        return False
        
    except Exception as e:
        print(f"❌ 完整RAG流程测试失败: {str(e)}")
        return False

def main():
    """主函数：运行完整的调试流程"""
    print("🚀 新PDF文件RAG系统全面调试")
    print("🎯 目标：确保能正确识别《金钟人》和《族姓赞》")
    print()
    
    # 步骤1：检查文件
    if not step1_check_pdf_file():
        return
    
    # 步骤2：提取和分析文本
    raw_text, keyword_positions, title_lines = step2_extract_and_analyze_text()
    
    # 步骤3：测试分块
    text_chunks = step3_test_text_chunking(raw_text)
    
    # 步骤4：测试向量化和检索
    vector_store, embedding_generator, best_query, best_results = step4_test_vectorization_and_retrieval(text_chunks)
    
    # 步骤5：测试完整RAG流程
    success = step5_test_full_rag_pipeline()
    
    print("\n" + "=" * 60)
    print("🎉 调试完成!")
    print("=" * 60)
    
    if success:
        print("✅ RAG系统工作正常，能够正确识别两首诗的标题！")
        print("\n🎯 建议使用以下问题进行测试：")
        print("   '翁和平的两首诗文分别叫什么名字，不用输出诗文内容'")
    else:
        print("❌ RAG系统仍存在问题，请检查以下方面：")
        print("1. PDF文本提取是否包含完整内容")
        print("2. 文本分块是否合理")
        print("3. 检索阈值是否过高")
        print("4. LLM提示词是否需要优化")
    
    print(f"\n📋 最终配置建议:")
    print(f"   块大小: {config.CHUNK_SIZE}")
    print(f"   重叠大小: {config.CHUNK_OVERLAP}")
    print(f"   相似度阈值: {config.SIMILARITY_THRESHOLD}")

if __name__ == "__main__":
    main() 