#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import sys
import os
import json
from datetime import datetime
from pathlib import Path

sys.path.append('src')

from processors.docx_processor import DOCXProcessor
from core.chunking_engine import ChunkingEngine
from core.text_cleaner import TextCleaner

def test_all_chunking_strategies():
    """全面测试所有切片策略"""
    
    print("=== 全面切片策略测试 ===")
    
    # 测试文件
    test_file = 'data/面向水库风险监测的低成本低功耗边缘智能系统.docx'
    
    if not os.path.exists(test_file):
        print(f"❌ 测试文件不存在: {test_file}")
        return
    
    # 读取文档
    print(f"📖 读取文档: {test_file}")
    with open(test_file, 'rb') as f:
        file_content = f.read()
    
    # 提取文档内容
    processor = DOCXProcessor()
    doc_result = processor.extract_content(file_content)
    
    if not doc_result:
        print("❌ 文档内容提取失败")
        return
    
    raw_text = doc_result['text']
    structure = doc_result.get('structure', {})
    
    print(f"📄 文档信息:")
    print(f"   - 原始文本长度: {len(raw_text)} 字符")
    print(f"   - 段落数: {len(structure.get('paragraphs', []))}")
    print(f"   - 标题数: {len(structure.get('headings', []))}")
    
    # 清洗文本
    cleaner = TextCleaner({
        'clean_unicode': True,
        'remove_special_chars': False,
        'fix_line_breaks': True,
        'remove_headers_footers': False,
        'remove_duplicates': True,
        'remove_empty_lines': True,
        'min_line_length': 3,
        'preserve_formatting': True
    })
    
    cleaned_text = cleaner.clean_text(raw_text)
    print(f"🧹 清洗后文本长度: {len(cleaned_text)} 字符")
    
    # 定义所有切片策略
    strategies = {
        '固定长度分段': {
            'strategy': '固定长度分段',
            'chunk_size': 1000,
            'overlap_size': 100,
            'respect_boundaries': True
        },
        '按内容类型分段': {
            'strategy': '按内容类型分段',
            'content_types': ['headings', 'paragraphs'],
            'merge_short_chunks': True,
            'min_chunk_size': 100
        },
        '按文档结构分段': {
            'strategy': '按文档结构分段',
            'structure_levels': ['section', 'subsection'],
            'include_headers': True,
            'max_section_size': 2000
        },
        '父子结构分段': {
            'strategy': '父子结构分段',
            'max_depth': 3,
            'parent_context': True,
            'context_size': 200
        },
        '按指定符号分段': {
            'strategy': '按指定符号分段',
            'delimiters': ['\n\n', '。', '！', '？'],
            'delimiter_priority': 'first_match',
            'keep_delimiter': False
        },
        '题目结构分段': {
            'strategy': '题目结构分段',
            'question_pattern': r'\d+[.、]',
            'preserve_structure': True
        }
    }
    
    # 创建输出目录
    timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
    output_dir = Path(f'output/comprehensive_test_{timestamp}')
    output_dir.mkdir(parents=True, exist_ok=True)
    
    test_results = {
        'test_info': {
            'timestamp': timestamp,
            'test_file': test_file,
            'original_text_length': len(raw_text),
            'cleaned_text_length': len(cleaned_text),
            'document_structure': {
                'paragraphs': len(structure.get('paragraphs', [])),
                'headings': len(structure.get('headings', [])),
                'tables': len(structure.get('tables', [])),
                'lists': len(structure.get('lists', []))
            }
        },
        'strategy_results': {}
    }
    
    # 测试每种策略
    for strategy_name, config in strategies.items():
        print(f"\n🔪 测试策略: {strategy_name}")
        print(f"   配置: {config}")
        
        try:
            # 创建切片引擎
            engine = ChunkingEngine(config)
            
            # 执行切片
            chunks = engine.chunk_document(cleaned_text, structure)
            
            print(f"   ✅ 切片完成: {len(chunks)} 个切片")
            
            # 分析切片结果
            chunk_analysis = analyze_chunks(chunks)
            
            # 保存结果
            strategy_result = {
                'config': config,
                'chunk_count': len(chunks),
                'analysis': chunk_analysis,
                'chunks': chunks[:10]  # 只保存前10个切片的详细信息
            }
            
            test_results['strategy_results'][strategy_name] = strategy_result
            
            # 保存详细切片到单独文件
            strategy_file = output_dir / f'{strategy_name}_详细结果.json'
            with open(strategy_file, 'w', encoding='utf-8') as f:
                json.dump({
                    'strategy': strategy_name,
                    'config': config,
                    'chunks': chunks,
                    'analysis': chunk_analysis
                }, f, ensure_ascii=False, indent=2)
            
            # 保存切片内容到文本文件
            text_file = output_dir / f'{strategy_name}_切片内容.txt'
            with open(text_file, 'w', encoding='utf-8') as f:
                f.write(f"切片策略: {strategy_name}\n")
                f.write(f"配置: {config}\n")
                f.write(f"切片数量: {len(chunks)}\n")
                f.write("=" * 50 + "\n\n")
                
                for i, chunk in enumerate(chunks):
                    f.write(f"=== 切片 {i+1} ===\n")
                    f.write(f"ID: {chunk['chunk_id']}\n")
                    f.write(f"类型: {chunk['metadata'].get('chunk_type', 'unknown')}\n")
                    f.write(f"字符数: {chunk['metadata'].get('char_count', 0)}\n")
                    f.write(f"词数: {chunk['metadata'].get('word_count', 0)}\n")
                    if 'title' in chunk['metadata']:
                        f.write(f"标题: {chunk['metadata']['title']}\n")
                    f.write(f"内容:\n{chunk['content']}\n\n")
            
            print(f"   📁 结果已保存到: {strategy_file}")
            
        except Exception as e:
            print(f"   ❌ 策略测试失败: {str(e)}")
            import traceback
            traceback.print_exc()
            
            test_results['strategy_results'][strategy_name] = {
                'error': str(e),
                'config': config
            }
    
    # 保存总体测试报告
    report_file = output_dir / 'comprehensive_test_report.json'
    with open(report_file, 'w', encoding='utf-8') as f:
        json.dump(test_results, f, ensure_ascii=False, indent=2)
    
    # 生成Markdown报告
    generate_markdown_report(test_results, output_dir)
    
    print(f"\n📊 测试完成！")
    print(f"📁 详细结果保存在: {output_dir}")
    print(f"📄 测试报告: {report_file}")
    
    return test_results

def analyze_chunks(chunks):
    """分析切片结果"""
    
    if not chunks:
        return {
            'total_chunks': 0,
            'avg_length': 0,
            'min_length': 0,
            'max_length': 0,
            'unique_content_ratio': 0
        }
    
    # 基本统计
    lengths = [chunk['metadata'].get('char_count', len(chunk['content'])) for chunk in chunks]
    
    # 检查内容唯一性
    unique_contents = set()
    for chunk in chunks:
        content_hash = hash(chunk['content'].strip())
        unique_contents.add(content_hash)
    
    unique_ratio = len(unique_contents) / len(chunks) if chunks else 0
    
    # 内容类型统计
    chunk_types = {}
    for chunk in chunks:
        chunk_type = chunk['metadata'].get('chunk_type', 'unknown')
        chunk_types[chunk_type] = chunk_types.get(chunk_type, 0) + 1
    
    return {
        'total_chunks': len(chunks),
        'avg_length': sum(lengths) / len(lengths) if lengths else 0,
        'min_length': min(lengths) if lengths else 0,
        'max_length': max(lengths) if lengths else 0,
        'unique_content_ratio': unique_ratio,
        'chunk_types': chunk_types,
        'length_distribution': {
            'short_chunks': len([l for l in lengths if l < 200]),
            'medium_chunks': len([l for l in lengths if 200 <= l < 1000]),
            'long_chunks': len([l for l in lengths if l >= 1000])
        }
    }

def generate_markdown_report(test_results, output_dir):
    """生成Markdown格式的测试报告"""
    
    report_file = output_dir / 'comprehensive_test_report.md'
    
    with open(report_file, 'w', encoding='utf-8') as f:
        f.write("# 切片策略全面测试报告\n\n")
        
        # 测试信息
        test_info = test_results['test_info']
        f.write("## 测试信息\n\n")
        f.write(f"- **测试时间**: {test_info['timestamp']}\n")
        f.write(f"- **测试文件**: {test_info['test_file']}\n")
        f.write(f"- **原始文本长度**: {test_info['original_text_length']:,} 字符\n")
        f.write(f"- **清洗后文本长度**: {test_info['cleaned_text_length']:,} 字符\n")
        
        # 文档结构
        structure = test_info['document_structure']
        f.write(f"- **文档结构**:\n")
        f.write(f"  - 段落数: {structure['paragraphs']}\n")
        f.write(f"  - 标题数: {structure['headings']}\n")
        f.write(f"  - 表格数: {structure['tables']}\n")
        f.write(f"  - 列表数: {structure['lists']}\n\n")
        
        # 策略结果对比
        f.write("## 策略结果对比\n\n")
        f.write("| 策略名称 | 切片数量 | 平均长度 | 唯一性比例 | 状态 |\n")
        f.write("|---------|---------|---------|-----------|------|\n")
        
        for strategy_name, result in test_results['strategy_results'].items():
            if 'error' in result:
                f.write(f"| {strategy_name} | - | - | - | ❌ 失败 |\n")
            else:
                analysis = result['analysis']
                f.write(f"| {strategy_name} | {analysis['total_chunks']} | {analysis['avg_length']:.0f} | {analysis['unique_content_ratio']:.2%} | ✅ 成功 |\n")
        
        f.write("\n")
        
        # 详细分析
        f.write("## 详细分析\n\n")
        
        for strategy_name, result in test_results['strategy_results'].items():
            f.write(f"### {strategy_name}\n\n")
            
            if 'error' in result:
                f.write(f"❌ **错误**: {result['error']}\n\n")
                continue
            
            analysis = result['analysis']
            config = result['config']
            
            f.write(f"**配置参数**:\n")
            for key, value in config.items():
                f.write(f"- {key}: {value}\n")
            
            f.write(f"\n**切片统计**:\n")
            f.write(f"- 总切片数: {analysis['total_chunks']}\n")
            f.write(f"- 平均长度: {analysis['avg_length']:.0f} 字符\n")
            f.write(f"- 最短切片: {analysis['min_length']} 字符\n")
            f.write(f"- 最长切片: {analysis['max_length']} 字符\n")
            f.write(f"- 内容唯一性: {analysis['unique_content_ratio']:.2%}\n")
            
            if analysis['chunk_types']:
                f.write(f"\n**切片类型分布**:\n")
                for chunk_type, count in analysis['chunk_types'].items():
                    f.write(f"- {chunk_type}: {count}\n")
            
            length_dist = analysis['length_distribution']
            f.write(f"\n**长度分布**:\n")
            f.write(f"- 短切片 (<200字符): {length_dist['short_chunks']}\n")
            f.write(f"- 中等切片 (200-1000字符): {length_dist['medium_chunks']}\n")
            f.write(f"- 长切片 (>1000字符): {length_dist['long_chunks']}\n\n")
        
        # 推荐
        f.write("## 推荐策略\n\n")
        
        # 找出最佳策略
        best_strategies = []
        for strategy_name, result in test_results['strategy_results'].items():
            if 'error' not in result:
                analysis = result['analysis']
                if analysis['unique_content_ratio'] > 0.8 and analysis['total_chunks'] > 1:
                    best_strategies.append((strategy_name, analysis))
        
        if best_strategies:
            f.write("基于测试结果，推荐以下策略：\n\n")
            for strategy_name, analysis in best_strategies:
                f.write(f"- **{strategy_name}**: {analysis['total_chunks']} 个切片，唯一性 {analysis['unique_content_ratio']:.2%}\n")
        else:
            f.write("⚠️ 未找到理想的切片策略，建议检查文档结构或调整参数。\n")
        
        f.write("\n---\n")
        f.write(f"*报告生成时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}*\n")

if __name__ == '__main__':
    test_all_chunking_strategies()