#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import json
import os
from datetime import datetime
from src.core.document_processor import DocumentProcessor
from src.core.text_cleaner import TextCleaner

def test_all_chunking_strategies():
    """测试所有切片策略"""
    
    # 文档路径
    doc_path = r"e:\trae\dify\data\面向水库风险监测的低成本低功耗边缘智能系统.docx"
    
    # 定义所有切片策略配置
    strategies = {
        '固定长度分段': {
            'strategy': '固定长度分段',
            'chunk_size': 500,
            'overlap_size': 50
        },
        '按内容类型分段': {
            'strategy': '按内容类型分段'
        },
        '按文档结构分段': {
            'strategy': '按文档结构分段',
            'max_depth': 3,
            'preserve_structure': True,
            'include_headers': True
        },
        '父子结构分段': {
            'strategy': '父子结构分段',
            'max_depth': 3
        },
        '按指定符号分段': {
            'strategy': '按指定符号分段',
            'delimiters': ['\n\n', '\n'],
            'delimiter_priority': 'first_match',
            'keep_delimiter': False
        },
        '题目结构分段': {
            'strategy': '题目结构分段',
            'question_pattern': r'^\d+\.'
        }
    }
    
    # 数据清洗配置
    cleaning_config = {
        'remove_headers_footers': True,
        'remove_special_chars': True,
        'remove_duplicates': True,
        'fix_line_breaks': True,
        'remove_empty_lines': True
    }
    
    # 测试结果
    test_results = {
        'test_time': datetime.now().isoformat(),
        'document_path': doc_path,
        'strategies': {}
    }
    
    print("=== 开始测试所有切片策略 ===")
    print(f"测试文档: {doc_path}")
    print(f"测试时间: {test_results['test_time']}")
    print()
    
    for strategy_name, chunking_config in strategies.items():
        print(f"\n--- 测试策略: {strategy_name} ---")
        
        try:
            # 创建文档处理器
            processor = DocumentProcessor(cleaning_config, chunking_config)
            
            # 处理文档
            result = processor.process_file(doc_path)
            
            if not result:
                print(f"策略 {strategy_name} 处理失败")
                test_results['strategies'][strategy_name] = {
                    'success': False,
                    'error': '文档处理失败'
                }
                continue
            
            # 获取切片结果
            chunks = result.get('chunks', [])
            
            # 分析切片质量
            chunk_contents = [chunk.get('content', '') for chunk in chunks]
            unique_contents = set(chunk_contents)
            
            # 计算统计信息
            total_chunks = len(chunks)
            unique_chunks = len(unique_contents)
            duplicate_rate = (total_chunks - unique_chunks) / total_chunks * 100 if total_chunks > 0 else 0
            
            # 计算平均切片长度
            avg_length = sum(len(content) for content in chunk_contents) / total_chunks if total_chunks > 0 else 0
            
            # 保存策略结果
            strategy_result = {
                'success': True,
                'chunk_count': total_chunks,
                'unique_count': unique_chunks,
                'duplicate_rate': round(duplicate_rate, 2),
                'avg_chunk_length': round(avg_length, 2),
                'config': chunking_config,
                'sample_chunks': chunks[:3] if chunks else []  # 保存前3个切片作为样本
            }
            
            test_results['strategies'][strategy_name] = strategy_result
            
            # 打印结果
            print(f"  ✓ 切片数量: {total_chunks}")
            print(f"  ✓ 唯一切片: {unique_chunks}")
            print(f"  ✓ 重复率: {duplicate_rate:.1f}%")
            print(f"  ✓ 平均长度: {avg_length:.0f} 字符")
            
            # 显示前3个切片的简要信息
            if chunks:
                print("  前3个切片预览:")
                for i, chunk in enumerate(chunks[:3]):
                    content = chunk.get('content', '')[:100].replace('\n', ' ')
                    print(f"    切片{i+1}: {content}... ({len(chunk.get('content', ''))} 字符)")
            
        except Exception as e:
            print(f"  ✗ 策略 {strategy_name} 测试失败: {str(e)}")
            test_results['strategies'][strategy_name] = {
                'success': False,
                'error': str(e)
            }
    
    # 保存测试结果
    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
    output_dir = f"output/comprehensive_test_{timestamp}"
    os.makedirs(output_dir, exist_ok=True)
    
    result_file = os.path.join(output_dir, "all_strategies_test_result.json")
    with open(result_file, 'w', encoding='utf-8') as f:
        json.dump(test_results, f, ensure_ascii=False, indent=2)
    
    print(f"\n=== 测试完成 ===")
    print(f"详细结果已保存到: {result_file}")
    
    # 打印总结
    print("\n=== 测试总结 ===")
    successful_strategies = [name for name, result in test_results['strategies'].items() if result.get('success', False)]
    failed_strategies = [name for name, result in test_results['strategies'].items() if not result.get('success', False)]
    
    print(f"成功策略 ({len(successful_strategies)}): {', '.join(successful_strategies)}")
    if failed_strategies:
        print(f"失败策略 ({len(failed_strategies)}): {', '.join(failed_strategies)}")
    
    # 显示最佳策略（基于唯一切片数和重复率）
    if successful_strategies:
        best_strategy = None
        best_score = -1
        
        for name in successful_strategies:
            result = test_results['strategies'][name]
            # 评分：唯一切片数 - 重复率
            score = result['unique_count'] - result['duplicate_rate'] / 10
            if score > best_score:
                best_score = score
                best_strategy = name
        
        if best_strategy:
            best_result = test_results['strategies'][best_strategy]
            print(f"\n推荐策略: {best_strategy}")
            print(f"  - 切片数: {best_result['chunk_count']}")
            print(f"  - 唯一切片: {best_result['unique_count']}")
            print(f"  - 重复率: {best_result['duplicate_rate']}%")
    
    return test_results

if __name__ == "__main__":
    test_all_chunking_strategies()